ArmNN
 23.08
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
88 #endif
89 
90 
91 namespace armnn
92 {
93 
94 namespace
95 {
96 
97 template<unsigned int FilterSize>
98 bool IsMatchingSize2d(const TensorInfo& weightInfo)
99 {
100  // Width & Height must match.
101  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
102 }
103 
104 template<uint32_t ValidStride>
105 bool IsMatchingStride(uint32_t actualStride)
106 {
107  return ValidStride == actualStride;
108 }
109 
110 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
111 bool IsMatchingStride(uint32_t actualStride)
112 {
113  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
114 }
115 
116 template<typename ... Args>
117 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
118 {
119  IgnoreUnused(reasonIfUnsupported, (args)...);
120 #if defined(ARMCOMPUTECL_ENABLED)
121  return true;
122 #else
123  if (reasonIfUnsupported)
124  {
125  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
126  }
127  return false;
128 #endif
129 }
130 
131 #if defined(ARMCOMPUTECL_ENABLED)
132 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
133 #else
134 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
135 #endif
136 
137 #if defined(ARMCOMPUTECL_ENABLED)
138 template<class FuncType, class... Args>
139 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
140 {
141  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
142  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
143  if (!supported && reasonIfUnsupported)
144  {
145  reasonIfUnsupported.value() = aclStatus.error_description();
146  }
147  return supported;
148 }
149 
150 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
151  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
152 #else
153 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
154  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
155 #endif
156 
157 template<typename FloatFunc, typename Uint8Func, typename ... Params>
158 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
159  DataType dataType,
160  FloatFunc floatFuncPtr,
161  Uint8Func uint8FuncPtr,
162  Params&&... params)
163 {
164  return IsClBackendSupported(reasonIfUnsupported) &&
165  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
166  dataType,
167  floatFuncPtr,
168  floatFuncPtr,
169  uint8FuncPtr,
170  &FalseFunc<>,
171  &FalseFunc<>,
172  std::forward<Params>(params)...);
173 }
174 } // anonymous namespace
175 
177  : m_ModelContextPtr(modelContextPtr)
178 {
179 }
180 
182  : m_ModelContextPtr(nullptr)
183 {
184 }
185 
187  const std::vector<TensorInfo>& infos,
188  const BaseDescriptor& descriptor,
189  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
190  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
191  Optional<std::string&> reasonIfUnsupported) const
192 {
193  switch (type)
194  {
196  return IsActivationSupported(infos[0],
197  infos[1],
198  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
199  reasonIfUnsupported);
200  case LayerType::Addition:
202  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
205  return IsArgMinMaxSupported(infos[0],
206  infos[1],
207  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
208  reasonIfUnsupported);
210  return IsBatchMatMulSupported(infos[0],
211  infos[1],
212  infos[2],
213  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
214  reasonIfUnsupported);
216  return IsBatchNormalizationSupported(infos[0],
217  infos[1],
218  infos[2],
219  infos[3],
220  infos[4],
221  infos[5],
222  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
223  (&descriptor)),
224  reasonIfUnsupported);
226  return IsBatchToSpaceNdSupported(infos[0],
227  infos[1],
228  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
229  reasonIfUnsupported);
230  case LayerType::Cast:
231  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
233  return IsChannelShuffleSupported(infos[0],
234  infos[1],
235  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
236  reasonIfUnsupported);
238  return IsComparisonSupported(infos[0],
239  infos[1],
240  infos[2],
241  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
242  reasonIfUnsupported);
243  case LayerType::Concat:
244  {
245  std::vector<const TensorInfo*> inputInfos;
246  for (uint32_t i = 0; i < (infos.size() - 1); i++)
247  {
248  inputInfos.push_back(&infos[i]);
249  }
250  return IsConcatSupported(inputInfos,
251  infos[infos.size() - 1],
252  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
253  reasonIfUnsupported);
254  }
255  case LayerType::Constant:
256  return IsConstantSupported(infos[0], reasonIfUnsupported);
258  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
260  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
262  {
263  if (infos.size() != 4)
264  {
265  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
266  "TensorInfos should be of format: {input, output, weights, biases}.");
267  }
268 
269  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
270  if (infos[3] == TensorInfo())
271  {
272  return IsConvolution2dSupported(infos[0],
273  infos[1],
274  desc,
275  infos[2],
276  EmptyOptional(),
277  reasonIfUnsupported);
278  }
279  else
280  {
281  return IsConvolution2dSupported(infos[0],
282  infos[1],
283  desc,
284  infos[2],
285  infos[3],
286  reasonIfUnsupported);
287  }
288  }
290  {
291  if (infos.size() != 4)
292  {
293  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
294  "TensorInfos should be of format: {input, output, weights, biases}.");
295  }
296 
297  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
298  if (infos[3] == TensorInfo())
299  {
300  return IsConvolution3dSupported(infos[0],
301  infos[1],
302  desc,
303  infos[2],
304  EmptyOptional(),
305  reasonIfUnsupported);
306  }
307  else
308  {
309  return IsConvolution3dSupported(infos[0],
310  infos[1],
311  desc,
312  infos[2],
313  infos[3],
314  reasonIfUnsupported);
315  }
316  }
318  return IsDepthToSpaceSupported(infos[0],
319  infos[1],
320  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
321  reasonIfUnsupported);
323  {
324  if (infos.size() != 4)
325  {
326  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
327  "TensorInfos should be of format: {input, output, weights, biases}.");
328  }
329 
330  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
331  if (infos[3] == TensorInfo())
332  {
333  return IsDepthwiseConvolutionSupported(infos[0],
334  infos[1],
335  desc,
336  infos[2],
337  EmptyOptional(),
338  reasonIfUnsupported);
339  }
340  else
341  {
342  return IsDepthwiseConvolutionSupported(infos[0],
343  infos[1],
344  desc,
345  infos[2],
346  infos[3],
347  reasonIfUnsupported);
348  }
349  }
351  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
352  case LayerType::Division:
354  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
357  {
358  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
359 
360  switch (desc.m_Operation)
361  {
364  reasonIfUnsupported,
365  infos[0],
366  infos[1],
367  infos[2],
368  nullptr);
371  reasonIfUnsupported,
372  infos[0],
373  infos[1],
374  infos[2],
375  nullptr);
378  reasonIfUnsupported,
379  infos[0],
380  infos[1],
381  infos[2]);
384  reasonIfUnsupported,
385  infos[0],
386  infos[1],
387  infos[2]);
390  reasonIfUnsupported,
391  infos[0],
392  infos[1],
393  infos[2],
394  nullptr);
398  reasonIfUnsupported,
399  infos[0],
400  infos[1],
401  infos[2],
402  desc,
403  nullptr);
406  reasonIfUnsupported,
407  infos[0],
408  infos[1],
409  infos[2],
410  nullptr);
411  default:
412  return false;
413  }
414  }
416  return IsElementwiseUnarySupported(infos[0],
417  infos[1],
418  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
419  reasonIfUnsupported);
420  case LayerType::Fill:
421  return IsFillSupported(infos[0],
422  infos[1],
423  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
424  reasonIfUnsupported);
425  case LayerType::Floor:
426  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
428  return IsFullyConnectedSupported(infos[0],
429  infos[1],
430  infos[2],
431  infos[3],
432  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
433  reasonIfUnsupported);
434  case LayerType::Gather:
435  return IsGatherSupported(infos[0],
436  infos[1],
437  infos[2],
438  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
439  reasonIfUnsupported);
440  case LayerType::GatherNd:
441  return IsGatherNdSupported(infos[0],
442  infos[1],
443  infos[2],
444  reasonIfUnsupported);
445  case LayerType::Input:
446  return IsInputSupported(infos[0], reasonIfUnsupported);
448  return IsInstanceNormalizationSupported(infos[0],
449  infos[1],
450  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
451  (&descriptor)),
452  reasonIfUnsupported);
454  return IsL2NormalizationSupported(infos[0],
455  infos[1],
456  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
457  reasonIfUnsupported);
459  return IsLogicalBinarySupported(infos[0],
460  infos[1],
461  infos[2],
462  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
463  reasonIfUnsupported);
465  return IsLogSoftmaxSupported(infos[0],
466  infos[1],
467  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
468  reasonIfUnsupported);
469  case LayerType::Lstm:
470  return IsLstmSupported(infos[0],
471  infos[1],
472  infos[2],
473  infos[3],
474  infos[4],
475  infos[5],
476  infos[6],
477  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
478  lstmParamsInfo.value(),
479  reasonIfUnsupported);
480  case LayerType::Map:
481  return true;
482  case LayerType::MemCopy:
483  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
485  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
486  case LayerType::Merge:
487  return LayerSupportBase::IsMergeSupported(infos[0],
488  infos[1],
489  infos[2],
490  reasonIfUnsupported);
491  case LayerType::Maximum:
493  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
495  case LayerType::Mean:
496  return IsMeanSupported(infos[0],
497  infos[1],
498  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
499  reasonIfUnsupported);
500  case LayerType::Minimum:
502  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
506  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
509  return IsNormalizationSupported(infos[0],
510  infos[1],
511  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
512  reasonIfUnsupported);
513  case LayerType::Output:
514  return IsOutputSupported(infos[0], reasonIfUnsupported);
515  case LayerType::Pad:
516  return IsPadSupported(infos[0],
517  infos[1],
518  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
519  reasonIfUnsupported);
520  case LayerType::Permute:
521  return IsPermuteSupported(infos[0],
522  infos[1],
523  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
524  reasonIfUnsupported);
526  return IsPooling2dSupported(infos[0],
527  infos[1],
528  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
529  reasonIfUnsupported);
531  return IsPooling3dSupported(infos[0],
532  infos[1],
533  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
534  reasonIfUnsupported);
535  case LayerType::Prelu:
536  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
537  case LayerType::QLstm:
538  return IsQLstmSupported(infos[0],
539  infos[1],
540  infos[2],
541  infos[3],
542  infos[4],
543  infos[5],
544  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
545  lstmParamsInfo.value(),
546  reasonIfUnsupported);
547  case LayerType::Quantize:
548  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
550  return IsQuantizedLstmSupported(infos[0],
551  infos[1],
552  infos[2],
553  infos[3],
554  infos[4],
555  quantizedLstmParamsInfo.value(),
556  reasonIfUnsupported);
557  case LayerType::Rank:
558  return true;
559  case LayerType::Reduce:
560  return IsReduceSupported(infos[0],
561  infos[1],
562  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
563  reasonIfUnsupported);
564  case LayerType::Reshape:
565  return IsReshapeSupported(infos[0],
566  infos[1],
567  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
568  reasonIfUnsupported);
569  case LayerType::Resize:
570  return IsResizeSupported(infos[0],
571  infos[1],
572  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
573  reasonIfUnsupported);
574  case LayerType::Shape:
575  return LayerSupportBase::IsShapeSupported(infos[0],
576  infos[1],
577  reasonIfUnsupported);
578  case LayerType::Slice:
579  return IsSliceSupported(infos[0],
580  infos[1],
581  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
582  reasonIfUnsupported);
583  case LayerType::Softmax:
584  return IsSoftmaxSupported(infos[0],
585  infos[1],
586  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
587  reasonIfUnsupported);
589  return IsSpaceToBatchNdSupported(infos[0],
590  infos[1],
591  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
592  reasonIfUnsupported);
594  return IsSpaceToDepthSupported(infos[0],
595  infos[1],
596  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
597  reasonIfUnsupported);
598  case LayerType::Splitter:
599  {
600  std::vector<TensorInfo> outputInfos;
601  for (uint32_t i = 1; i < infos.size(); i++)
602  {
603  outputInfos.push_back(infos[i]);
604  }
605  return IsSplitterSupported(infos[0],
606  {outputInfos.begin(), outputInfos.end()},
607  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
608  reasonIfUnsupported);
609  }
610  case LayerType::Stack:
611  {
612  std::vector<const TensorInfo*> inputInfos;
613  for (uint32_t i = 0; i < infos.size() - 1; i++)
614  {
615  inputInfos.push_back(&infos[i]);
616  }
617  return IsStackSupported(inputInfos,
618  infos[infos.size() - 1],
619  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
620  reasonIfUnsupported);
621  }
623  return IsStridedSliceSupported(infos[0],
624  infos[1],
625  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
626  reasonIfUnsupported);
629  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
631  case LayerType::Tile:
632  return IsTileSupported(infos[0],
633  infos[1],
634  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
635  reasonIfUnsupported);
637  return IsTransposeSupported(infos[0],
638  infos[1],
639  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
640  reasonIfUnsupported);
642  {
643  if (infos.size() != 4)
644  {
645  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
646  "TensorInfos should be of format: {input, output, weights, biases}.");
647  }
648 
649  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
650  if (infos[3] == TensorInfo())
651  {
652  return IsTransposeConvolution2dSupported(infos[0],
653  infos[1],
654  desc,
655  infos[2],
656  EmptyOptional(),
657  reasonIfUnsupported);
658  }
659  else
660  {
661  return IsTransposeConvolution2dSupported(infos[0],
662  infos[1],
663  desc,
664  infos[2],
665  infos[3],
666  reasonIfUnsupported);
667  }
668  }
671  infos[1],
672  infos[2],
673  infos[3],
674  infos[4],
675  infos[5],
676  *(PolymorphicDowncast<const
677  UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
678  lstmParamsInfo.value(),
679  reasonIfUnsupported);
680  case LayerType::Unmap:
681  return true;
682  default:
683  // layers not supported in cl by default:
684  // debug, detectionpostprocess, fakequantization,
685  // precompiled, standin, switch, pooling3d
686  return false;
687  }
688 }
689 
691  const TensorInfo& output,
692  const ActivationDescriptor& descriptor,
693  Optional<std::string&> reasonIfUnsupported) const
694 {
696  reasonIfUnsupported,
697  input,
698  output,
699  descriptor);
700 }
701 
703  const TensorInfo& input1,
704  const TensorInfo& output,
705  Optional<std::string&> reasonIfUnsupported) const
706 {
708  reasonIfUnsupported,
709  input0,
710  input1,
711  output,
712  nullptr);
713 }
714 
716  const TensorInfo& output,
717  const ArgMinMaxDescriptor& descriptor,
718  Optional<std::string&> reasonIfUnsupported) const
719 {
720 
722  reasonIfUnsupported,
723  input,
724  output,
725  descriptor);
726 }
727 
729  const TensorInfo& inputY,
730  const TensorInfo& output,
731  const BatchMatMulDescriptor& descriptor,
732  Optional<std::string&> reasonIfUnsupported) const
733 {
735  reasonIfUnsupported,
736  inputX,
737  inputY,
738  output,
739  descriptor,
740  nullptr);
741 }
742 
744  const TensorInfo& output,
745  const TensorInfo& mean,
746  const TensorInfo& var,
747  const TensorInfo& beta,
748  const TensorInfo& gamma,
749  const BatchNormalizationDescriptor& descriptor,
750  Optional<std::string&> reasonIfUnsupported) const
751 {
753  reasonIfUnsupported,
754  input,
755  output,
756  mean,
757  var,
758  beta,
759  gamma,
760  descriptor,
761  nullptr);
762 }
763 
765  const TensorInfo& output,
766  const BatchToSpaceNdDescriptor& descriptor,
767  Optional<std::string&> reasonIfUnsupported) const
768 {
770  reasonIfUnsupported,
771  input,
772  output,
773  descriptor);
774 }
775 
777  const TensorInfo& output,
778  Optional<std::string&> reasonIfUnsupported) const
779 {
781  reasonIfUnsupported,
782  input,
783  output);
784 }
785 
787  const TensorInfo& output,
788  const ChannelShuffleDescriptor& descriptor,
789  Optional<std::string&> reasonIfUnsupported) const
790 {
792  reasonIfUnsupported,
793  input,
794  output,
795  descriptor);
796 }
797 
799  const TensorInfo& input1,
800  const TensorInfo& output,
801  const ComparisonDescriptor& descriptor,
802  Optional<std::string&> reasonIfUnsupported) const
803 {
805  reasonIfUnsupported,
806  input0,
807  input1,
808  output,
809  descriptor);
810 }
811 
812 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
813  const TensorInfo& output,
814  const OriginsDescriptor& descriptor,
815  Optional<std::string&> reasonIfUnsupported) const
816 {
817  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
818  {
819  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
820  return false;
821  }
822 
823  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
824  if(concatInnerAxis < 3) // Width, height, or channels
825  {
827  reasonIfUnsupported,
828  inputs,
829  output,
830  descriptor);
831  }
832  else if (concatInnerAxis == 3)
833  {
834  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
835  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
836  for (auto& input : inputs)
837  {
838  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
839  {
840  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
841  return false;
842  }
843  }
844  return true; // Sub-tensors support concat along batch
845  }
846  else // > 4 dimensions not supported.
847  {
848  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
849  return false;
850  }
851 }
852 
854  Optional<std::string&> reasonIfUnsupported) const
855 {
857  reasonIfUnsupported,
858  output);
859 }
860 
862  const TensorInfo& output,
863  Optional<std::string&> reasonIfUnsupported) const
864 {
866  reasonIfUnsupported,
867  input,
868  output);
869 }
870 
872  const TensorInfo& output,
873  Optional<std::string&> reasonIfUnsupported) const
874 {
876  reasonIfUnsupported,
877  input,
878  output);
879 }
880 
882  const TensorInfo& output,
883  const Convolution2dDescriptor& descriptor,
884  const TensorInfo& weights,
885  const Optional<TensorInfo>& biases,
886  Optional<std::string&> reasonIfUnsupported) const
887 {
888  bool isFastMathEnabled = false;
889 #if defined(ARMCOMPUTECL_ENABLED)
890  if (m_ModelContextPtr)
891  {
892  if (m_ModelContextPtr.get() != nullptr)
893  {
894  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
895  if (modelOptions)
896  {
897  isFastMathEnabled = modelOptions->IsFastMathEnabled();
898  }
899  }
900  }
901 #endif
902 
904  reasonIfUnsupported,
905  input,
906  output,
907  descriptor,
908  weights,
909  biases,
910  isFastMathEnabled,
911  nullptr);
912 }
913 
915  const TensorInfo& output,
916  const Convolution3dDescriptor& descriptor,
917  const TensorInfo& weights,
918  const Optional<TensorInfo>& biases,
919  Optional<std::string&> reasonIfUnsupported) const
920 {
921  bool isFastMathEnabled = false;
922 #if defined(ARMCOMPUTECL_ENABLED)
923  if (m_ModelContextPtr)
924 {
925  if (m_ModelContextPtr.get() != nullptr)
926  {
927  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
928  if (modelOptions)
929  {
930  isFastMathEnabled = modelOptions->IsFastMathEnabled();
931  }
932  }
933 }
934 #endif
935 
937  reasonIfUnsupported,
938  input,
939  output,
940  descriptor,
941  weights,
942  biases,
943  isFastMathEnabled,
944  nullptr);
945 }
946 
948  const TensorInfo& output,
949  Optional<std::string&> reasonIfUnsupported) const
950 {
952  reasonIfUnsupported,
953  input,
954  output);
955 }
956 
958  const TensorInfo& output,
959  const DepthToSpaceDescriptor& descriptor,
960  Optional<std::string&> reasonIfUnsupported) const
961 {
963  reasonIfUnsupported,
964  input,
965  output,
966  descriptor);
967 }
968 
970  const TensorInfo& output,
971  const DepthwiseConvolution2dDescriptor& descriptor,
972  const TensorInfo& weights,
973  const Optional<TensorInfo>& biases,
974  Optional<std::string&> reasonIfUnsupported) const
975 {
977  reasonIfUnsupported,
978  input,
979  output,
980  descriptor,
981  weights,
982  biases,
983  nullptr);
984 }
985 
987  const TensorInfo& output,
988  const DepthwiseConvolution2dDescriptor& descriptor,
989  const TensorInfo& weights,
990  const Optional<TensorInfo>& biases,
991  Optional<std::string&> reasonIfUnsupported) const
992 {
994  reasonIfUnsupported,
995  input,
996  output,
997  descriptor,
998  weights,
999  biases,
1000  nullptr);
1001 }
1002 
1003 
1005  const TensorInfo& input1,
1006  const TensorInfo& output,
1007  Optional<std::string&> reasonIfUnsupported) const
1008 {
1010  reasonIfUnsupported,
1011  input0,
1012  input1,
1013  output,
1014  nullptr);
1015 }
1016 
1018  const TensorInfo& output,
1019  const ElementwiseUnaryDescriptor& descriptor,
1020  Optional<std::string&> reasonIfUnsupported) const
1021 {
1022  switch(descriptor.m_Operation)
1023  {
1024  case UnaryOperation::Abs:
1026  reasonIfUnsupported,
1027  input,
1028  output);
1029  case UnaryOperation::Exp:
1031  reasonIfUnsupported,
1032  input,
1033  output);
1034  case UnaryOperation::Log:
1036  reasonIfUnsupported,
1037  input,
1038  output);
1041  reasonIfUnsupported,
1042  input,
1043  output);
1044  case UnaryOperation::Neg:
1046  reasonIfUnsupported,
1047  input,
1048  output);
1049  case UnaryOperation::Rsqrt:
1051  reasonIfUnsupported,
1052  input,
1053  output);
1054  case UnaryOperation::Sin:
1056  reasonIfUnsupported,
1057  input,
1058  output);
1059  case UnaryOperation::Sqrt:
1061  reasonIfUnsupported,
1062  input,
1063  output);
1064  default:
1065  return false;
1066  }
1067 }
1068 
1070  const TensorInfo& output,
1071  const FillDescriptor& descriptor,
1072  Optional<std::string&> reasonIfUnsupported) const
1073 {
1074  armnn::IgnoreUnused(input);
1075  armnn::IgnoreUnused(output);
1076  armnn::IgnoreUnused(descriptor);
1077 
1078  return IsClBackendSupported(reasonIfUnsupported);
1079 }
1080 
1082  const TensorInfo& output,
1083  Optional<std::string&> reasonIfUnsupported) const
1084 {
1086  reasonIfUnsupported,
1087  input,
1088  output);
1089 }
1090 
1092  const TensorInfo& output,
1093  const TensorInfo& weights,
1094  const TensorInfo& biases,
1095  const FullyConnectedDescriptor& descriptor,
1096  Optional<std::string&> reasonIfUnsupported) const
1097 {
1099  reasonIfUnsupported,
1100  input,
1101  output,
1102  weights,
1103  biases,
1104  descriptor,
1105  nullptr);
1106 }
1107 
1109  const TensorInfo& input1,
1110  const TensorInfo& output,
1111  const GatherDescriptor& descriptor,
1112  Optional<std::string&> reasonIfUnsupported) const
1113 {
1115  reasonIfUnsupported,
1116  input0,
1117  input1,
1118  output,
1119  descriptor);
1120 }
1121 
1123  const TensorInfo& input1,
1124  const TensorInfo& output,
1125  Optional<std::string&> reasonIfUnsupported) const
1126 {
1128  reasonIfUnsupported,
1129  input0,
1130  input1,
1131  output);
1132 }
1133 
1135  Optional<std::string&> reasonIfUnsupported) const
1136 {
1137  return IsClBackendSupported(reasonIfUnsupported, input);
1138 }
1139 
1141  const TensorInfo& output,
1142  const InstanceNormalizationDescriptor& descriptor,
1143  Optional<std::string&> reasonIfUnsupported) const
1144 {
1146  reasonIfUnsupported,
1147  input,
1148  output,
1149  descriptor);
1150 }
1151 
1153  const TensorInfo& output,
1154  const L2NormalizationDescriptor& descriptor,
1155  Optional<std::string&> reasonIfUnsupported) const
1156 {
1158  reasonIfUnsupported,
1159  input,
1160  output,
1161  descriptor);
1162 }
1163 
1165  const TensorInfo& input1,
1166  const TensorInfo& output,
1167  const LogicalBinaryDescriptor& descriptor,
1168  Optional<std::string&> reasonIfUnsupported) const
1169 {
1170  IgnoreUnused(output);
1171 
1172  switch(descriptor.m_Operation)
1173  {
1176  reasonIfUnsupported,
1177  input0,
1178  input1,
1179  output);
1182  reasonIfUnsupported,
1183  input0,
1184  input1,
1185  output);
1186  default:
1187  return false;
1188  }
1189 }
1190 
1191 
1193  const TensorInfo& output,
1194  const LogSoftmaxDescriptor& descriptor,
1195  Optional<std::string&> reasonIfUnsupported) const
1196 {
1198  reasonIfUnsupported,
1199  input,
1200  output,
1201  descriptor);
1202 }
1203 
1205  const TensorInfo& outputStateIn,
1206  const TensorInfo& cellStateIn,
1207  const TensorInfo& scratchBuffer,
1208  const TensorInfo& outputStateOut,
1209  const TensorInfo& cellStateOut,
1210  const TensorInfo& output,
1211  const LstmDescriptor& descriptor,
1212  const LstmInputParamsInfo& paramsInfo,
1213  Optional<std::string&> reasonIfUnsupported) const
1214 {
1216  reasonIfUnsupported,
1217  input,
1218  outputStateIn,
1219  cellStateIn,
1220  scratchBuffer,
1221  outputStateOut,
1222  cellStateOut,
1223  output,
1224  descriptor,
1225  paramsInfo);
1226 }
1227 
1229  const TensorInfo& input1,
1230  const TensorInfo& output,
1231  Optional<std::string&> reasonIfUnsupported) const
1232 {
1234  reasonIfUnsupported,
1235  input0,
1236  input1,
1237  output);
1238 }
1239 
1241  const TensorInfo& output,
1242  const MeanDescriptor& descriptor,
1243  Optional<std::string&> reasonIfUnsupported) const
1244 {
1246  reasonIfUnsupported,
1247  input,
1248  output,
1249  descriptor);
1250 }
1251 
1253  const TensorInfo& input1,
1254  const TensorInfo& output,
1255  Optional<std::string&> reasonIfUnsupported) const
1256 {
1258  reasonIfUnsupported,
1259  input0,
1260  input1,
1261  output);
1262 }
1263 
1265  const TensorInfo& input1,
1266  const TensorInfo& output,
1267  Optional<std::string&> reasonIfUnsupported) const
1268 {
1270  reasonIfUnsupported,
1271  input0,
1272  input1,
1273  output,
1274  nullptr);
1275 }
1276 
1278  const TensorInfo& output,
1279  const NormalizationDescriptor& descriptor,
1280  Optional<std::string&> reasonIfUnsupported) const
1281 {
1282  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1283 }
1284 
1286  Optional<std::string&> reasonIfUnsupported) const
1287 {
1288  return IsClBackendSupported(reasonIfUnsupported, output);
1289 }
1290 
1292  const TensorInfo& output,
1293  const PadDescriptor& descriptor,
1294  Optional<std::string&> reasonIfUnsupported) const
1295 {
1297  reasonIfUnsupported,
1298  input,
1299  output,
1300  descriptor);
1301 }
1302 
1304  const TensorInfo& output,
1305  const PermuteDescriptor& descriptor,
1306  Optional<std::string&> reasonIfUnsupported) const
1307 {
1308  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1309 }
1310 
1312  const TensorInfo& output,
1313  const Pooling2dDescriptor& descriptor,
1314  Optional<std::string&> reasonIfUnsupported) const
1315 {
1316  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1317 }
1318 
1320  const TensorInfo& output,
1321  const Pooling3dDescriptor& descriptor,
1322  Optional<std::string&> reasonIfUnsupported) const
1323 {
1324  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1325 }
1326 
1328  const armnn::TensorInfo &alpha,
1329  const armnn::TensorInfo &output,
1330  armnn::Optional<std::string &> reasonIfUnsupported) const
1331 {
1332  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1333 }
1334 
1336  const TensorInfo& previousOutputIn,
1337  const TensorInfo& previousCellStateIn,
1338  const TensorInfo& outputStateOut,
1339  const TensorInfo& cellStateOut,
1340  const TensorInfo& output,
1341  const QLstmDescriptor& descriptor,
1342  const LstmInputParamsInfo& paramsInfo,
1343  Optional<std::string&> reasonIfUnsupported) const
1344 {
1345  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1346  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1347  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1348  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1349  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1351  {
1353  reasonIfUnsupported,
1354  input,
1355  previousCellStateIn,
1356  previousOutputIn,
1357  cellStateOut,
1358  outputStateOut,
1359  output,
1360  descriptor,
1361  paramsInfo);
1362  }
1363  else
1364  {
1365  return false;
1366  }
1367 }
1368 
1370  const TensorInfo& previousCellStateIn,
1371  const TensorInfo& previousOutputIn,
1372  const TensorInfo& cellStateOut,
1373  const TensorInfo& output,
1374  const QuantizedLstmInputParamsInfo& paramsInfo,
1375  Optional<std::string&> reasonIfUnsupported) const
1376 {
1378  reasonIfUnsupported,
1379  input,
1380  previousCellStateIn,
1381  previousOutputIn,
1382  cellStateOut,
1383  output,
1384  paramsInfo);
1385 }
1386 
1388  const TensorInfo& output,
1389  Optional<std::string&> reasonIfUnsupported) const
1390 {
1392  reasonIfUnsupported,
1393  input,
1394  output);
1395 }
1396 
1398  const TensorInfo& output,
1399  const ReduceDescriptor& descriptor,
1400  Optional<std::string&> reasonIfUnsupported) const
1401 {
1403  reasonIfUnsupported,
1404  input,
1405  output,
1406  descriptor);
1407 }
1408 
1410  const TensorInfo& output,
1411  const ReshapeDescriptor& descriptor,
1412  Optional<std::string&> reasonIfUnsupported) const
1413 {
1414  IgnoreUnused(descriptor);
1415  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
1416 }
1417 
1419  const TensorInfo& output,
1420  const ResizeDescriptor& descriptor,
1421  Optional<std::string&> reasonIfUnsupported) const
1422 {
1423  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1424 }
1425 
1427  const TensorInfo& output,
1428  const SliceDescriptor& descriptor,
1429  Optional<std::string&> reasonIfUnsupported) const
1430 {
1431  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1432 }
1433 
1435  const TensorInfo& output,
1436  const SoftmaxDescriptor& descriptor,
1437  Optional<std::string&> reasonIfUnsupported) const
1438 {
1439  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1440 }
1441 
1443  const TensorInfo& output,
1444  const SpaceToBatchNdDescriptor& descriptor,
1445  Optional<std::string&> reasonIfUnsupported) const
1446 {
1448  reasonIfUnsupported,
1449  input,
1450  output,
1451  descriptor);
1452 }
1453 
1455  const TensorInfo& output,
1456  const SpaceToDepthDescriptor& descriptor,
1457  Optional<std::string&> reasonIfUnsupported) const
1458 {
1460  reasonIfUnsupported,
1461  input,
1462  output,
1463  descriptor);
1464 }
1465 
1467  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1468  const ViewsDescriptor& descriptor,
1469  Optional<std::string&> reasonIfUnsupported) const
1470 {
1471 #if defined(ARMCOMPUTECL_ENABLED)
1472  // Split along the last dimension, cannot use sub-tensors
1473  // as width and height of the sub-tensors do not match
1474  // the width and height of the parent tensor
1475  // in case of input with more than 2D.
1476  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1477  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1478  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1479  {
1481  reasonIfUnsupported,
1482  input,
1483  outputs,
1484  *splitAxis.begin());
1485  }
1486 #endif
1487  IgnoreUnused(descriptor);
1488  for (auto output : outputs)
1489  {
1490  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1491  {
1492  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1493  return false;
1494  }
1495  }
1496  return true;
1497 }
1498 
1499 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1500  const TensorInfo& output,
1501  const StackDescriptor& descriptor,
1502  Optional<std::string&> reasonIfUnsupported) const
1503 {
1505  reasonIfUnsupported,
1506  inputs,
1507  output,
1508  descriptor);
1509 }
1510 
1512  const TensorInfo& output,
1513  const StridedSliceDescriptor& descriptor,
1514  Optional<std::string&> reasonIfUnsupported) const
1515 {
1517  reasonIfUnsupported,
1518  input,
1519  output,
1520  descriptor);
1521 }
1522 
1524  const TensorInfo& input1,
1525  const TensorInfo& output,
1526  Optional<std::string&> reasonIfUnsupported) const
1527 {
1529  reasonIfUnsupported,
1530  input0,
1531  input1,
1532  output,
1533  nullptr);
1534 }
1535 
1537  const TensorInfo& output,
1538  const TileDescriptor& descriptor,
1539  Optional<std::string&> reasonIfUnsupported) const
1540 {
1542  reasonIfUnsupported,
1543  input,
1544  output,
1545  descriptor);
1546 }
1547 
1549  const TensorInfo& output,
1550  const TransposeConvolution2dDescriptor& descriptor,
1551  const TensorInfo& weights,
1552  const Optional<TensorInfo>& biases,
1553  Optional<std::string&> reasonIfUnsupported) const
1554 {
1556  reasonIfUnsupported,
1557  input,
1558  output,
1559  descriptor,
1560  weights,
1561  biases);
1562 }
1563 
1565  const TensorInfo& output,
1566  const TransposeDescriptor& descriptor,
1567  Optional<std::string&> reasonIfUnsupported) const
1568 {
1569  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1570 }
1571 
1573  const TensorInfo& outputStateIn,
1574  const TensorInfo& cellStateIn,
1575  const TensorInfo& outputStateOut,
1576  const TensorInfo& cellStateOut,
1577  const TensorInfo& output,
1578  const UnidirectionalSequenceLstmDescriptor& descriptor,
1579  const LstmInputParamsInfo& paramsInfo,
1580  Optional<std::string&> reasonIfUnsupported) const
1581 {
1583  reasonIfUnsupported,
1584  input,
1585  outputStateIn,
1586  cellStateIn,
1587  outputStateOut,
1588  cellStateOut,
1589  output,
1590  descriptor,
1591  paramsInfo);
1592 }
1593 
1594 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::ClLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1311
ClPooling3dWorkload.hpp
armnn::ClLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:743
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::ClLogWorkloadValidate
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogWorkload.cpp:18
armnn::BinaryOperation::Mul
@ Mul
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::ClRsqrtWorkloadValidate
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClRsqrtWorkload.cpp:18
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
ClConstantWorkload.hpp
armnn::ClLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1252
ClDepthwiseConvolutionWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
ClArgMinMaxWorkload.hpp
armnn::ClFloorWorkloadValidate
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClFloorFloatWorkload.cpp:14
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1359
armnn::ClLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1327
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::Optional
Definition: Optional.hpp:270
armnn::ClLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:881
armnn::ClLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1204
ClLogWorkload.hpp
armnn::ClLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:812
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
ClElementwiseBinaryWorkload.hpp
armnn::ClTransposeConvolution2dWorkloadValidate
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: ClTransposeConvolution2dWorkload.cpp:26
ClLogSoftmaxWorkload.hpp
armnn::ClCastValidate
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClCastWorkload.cpp:20
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: ClLayerSupport.cpp:150
ClDepthToSpaceWorkload.hpp
armnn::ClStridedSliceWorkloadValidate
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: ClStridedSliceWorkload.cpp:27
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::ClExpWorkloadValidate
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClExpWorkload.cpp:18
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:964
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::ClLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:702
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::ClLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:728
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
ClQuantizedLstmWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:944
armnn::ClGatherNdWorkloadValidate
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: ClGatherNdWorkload.cpp:16
armnn::ClSpaceToDepthWorkloadValidate
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: ClSpaceToDepthWorkload.cpp:54
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ClLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1228
armnn::ClSplitterWorkloadValidate
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: ClSplitterWorkload.cpp:31
armnn::ClLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:690
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
ClUnidirectionalSequenceLstmFloatWorkload.hpp
ClNormalizationFloatWorkload.hpp
armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClUnidirectionalSequenceLstmFloatWorkload.cpp:508
armnn::ClBatchToSpaceNdWorkloadValidate
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: ClBatchToSpaceNdWorkload.cpp:17
ClLayerSupport.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::ClLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1285
armnn::ClLogicalAndWorkloadValidate
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalAndWorkload.cpp:20
armnn::LayerType::Comparison
@ Comparison
armnn::ClLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:969
armnn::ClLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1291
armnn::ClLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1091
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ClLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1164
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1541
ClPooling2dWorkload.hpp
armnn::ClDepthToSpaceWorkloadValidate
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: ClDepthToSpaceWorkload.cpp:22
armnn::ClActivationWorkloadValidate
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: ClActivationWorkload.cpp:17
armnn::ClMeanValidate
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: ClMeanWorkload.cpp:17
armnn::ClElementwiseBinaryValidate
arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClElementwiseBinaryWorkload.cpp:64
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
ClReduceWorkload.hpp
ClMeanWorkload.hpp
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ClSinWorkload.hpp
armnn::ClChannelShuffleValidate
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: ClChannelShuffleWorkload.cpp:20
armnn::LayerType::Tile
@ Tile
ClResizeWorkload.hpp
ClComparisonWorkload.hpp
ClPreluWorkload.hpp
armnn::ClLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1277
armnn::ClLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1397
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1230
IgnoreUnused.hpp
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
ClCastWorkload.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::ClLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1426
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::ClLayerSupport::ClLayerSupport
ClLayerSupport()
Definition: ClLayerSupport.cpp:181
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:244
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ClLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1434
ClBatchToSpaceNdWorkload.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::ClLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1240
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::ClL2NormalizationWorkloadValidate
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: ClL2NormalizationFloatWorkload.cpp:17
armnn::ClLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:786
armnn::ClLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:861
armnn::ClLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1081
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
ClDivisionWorkload.hpp
ClGatherNdWorkload.hpp
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::ClNegWorkloadValidate
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClNegWorkload.cpp:18
ClBackendModelContext.hpp
LayerSupportCommon.hpp
ClGatherWorkload.hpp
armnn::ClLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1387
armnn::ClQLstmWorkloadValidate
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClQLstmWorkload.cpp:247
armnn::ClReduceWorkloadValidate
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: ClReduceWorkload.cpp:18
armnn::LayerType::Slice
@ Slice
ClExpWorkload.hpp
armnn::ClLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1264
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
armnn::ClLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1548
ClInstanceNormalizationWorkload.hpp
armnn::ClConstantWorkloadValidate
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
Definition: ClConstantWorkload.cpp:18
armnn::BinaryOperation::Maximum
@ Maximum
armnn::ClPermuteWorkloadValidate
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: ClPermuteWorkload.cpp:17
armnn::ClLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1140
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
ClActivationWorkload.hpp
ClTransposeConvolution2dWorkload.hpp
ClDequantizeWorkload.hpp
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::ClLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1454
armnn::ClLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1409
ClSpaceToBatchNdWorkload.hpp
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::ClLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: ClLayerSupport.cpp:186
armnn::ClResizeWorkloadValidate
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: ClResizeWorkload.cpp:22
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1513
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1175
armnn::UnaryOperation::Exp
@ Exp
armnn::ClPooling3dWorkloadValidate
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: ClPooling3dWorkload.cpp:18
armnn::ClStackWorkloadValidate
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: ClStackWorkload.cpp:29
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1469
ClSliceWorkload.hpp
ClTransposeWorkload.hpp
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
ClStridedSliceWorkload.hpp
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1207
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
ClL2NormalizationFloatWorkload.hpp
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::ClLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1572
armnn::ClComparisonWorkloadValidate
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: ClComparisonWorkload.cpp:24
armnn::ClTileWorkloadValidate
arm_compute::Status ClTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: ClTileWorkload.cpp:16
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1002
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::ClLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:798
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
ClConcatWorkload.hpp
armnn::ClLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1319
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
ClConvertFp16ToFp32Workload.hpp
ClFullyConnectedWorkload.hpp
armnn::ClConvertFp32ToFp16WorkloadValidate
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp32ToFp16Workload.cpp:44
armnn::ClTransposeWorkloadValidate
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: ClTransposeWorkload.cpp:17
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1563
ClSpaceToDepthWorkload.hpp
armnn::ClSoftmaxWorkloadValidate
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: ClSoftmaxWorkload.cpp:17
armnn::LayerType::Addition
@ Addition
armnn::ClPadValidate
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: ClPadWorkload.cpp:62
ClNegWorkload.hpp
armnn::ClLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1564
armnn::ClLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1536
ArmComputeUtils.hpp
armnn::ClInstanceNormalizationWorkloadValidate
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: ClInstanceNormalizationWorkload.cpp:18
armnn::ClGatherWorkloadValidate
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: ClGatherWorkload.cpp:15
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1022
ClLogicalNotWorkload.hpp
ClLstmFloatWorkload.hpp
armnn::ClSliceWorkloadValidate
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: ClSliceWorkload.cpp:18
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchMatMulWorkload.cpp:24
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::ClLstmFloatWorkloadValidate
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClLstmFloatWorkload.cpp:244
armnn::BinaryOperation::Power
@ Power
armnn::ClLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1442
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
ClQuantizeWorkload.hpp
armnn::ClLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1152
armnn::ClLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:776
ClBatchNormalizationFloatWorkload.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::ClSqrtWorkloadValidate
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSqrtWorkload.cpp:19
armnn::LayerType::Division
@ Division
armnn::LayerType::Shape
@ Shape
armnn::ClNormalizationWorkloadValidate
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: ClNormalizationFloatWorkload.cpp:19
armnn::ClLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1004
ClLogicalAndWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ClLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1134
armnn::ClLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:957
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
ClPadWorkload.hpp
ClConvolution2dWorkload.hpp
ClRsqrtWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::ClLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:715
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::ClLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:764
ClConvertFp32ToFp16Workload.hpp
ClBackendId.hpp
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ClLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1017
ClAdditionWorkload.hpp
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
ClConvolution3dWorkload.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::ClLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1418
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::ClLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1069
armnn::ClLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:914
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1081
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1282
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
armnn::ClSpaceToBatchNdWorkloadValidate
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: ClSpaceToBatchNdWorkload.cpp:16
armnn::Status
Status
Definition: Types.hpp:42
armnn::ClLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1369
ClMinimumWorkload.hpp
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1497
armnn::ClLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1523
ClStackWorkload.hpp
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ClLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1303
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::ClConvolution3dWorkloadValidate
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution3dWorkload.cpp:23
armnn::ClLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1122
armnn::LayerType::Fill
@ Fill
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:300
armnn::ClQuantizedLstmWorkloadValidate
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: ClQuantizedLstmWorkload.cpp:18
armnn::ClLogSoftmaxWorkloadValidate
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: ClLogSoftmaxWorkload.cpp:17
armnn::LayerType::Minimum
@ Minimum
ClSplitterWorkload.hpp
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
ClBatchMatMulWorkload.hpp
armnn::ClConcatWorkloadValidate
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: ClConcatWorkload.cpp:27
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::ClLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1466
ClMaximumWorkload.hpp
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
ClSubtractionWorkload.hpp
armnn::ClLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:947
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1419
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ClPreluWorkloadValidate
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: ClPreluWorkload.cpp:16
armnn::ClLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1108
armnn::LayerType::Pad
@ Pad
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ClSinWorkloadValidate
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSinWorkload.cpp:18
armnn::LayerType::Rank
@ Rank
armnn::ClAbsWorkloadValidate
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClAbsWorkload.cpp:19
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
InternalTypes.hpp
armnn::ClLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1511
ClMultiplicationWorkload.hpp
armnn::ClLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1499
ClAbsWorkload.hpp
ClTileWorkload.hpp
armnn::LayerType::Input
@ Input
ClReshapeWorkload.hpp
armnn::ClConvertFp16ToFp32WorkloadValidate
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp16ToFp32Workload.cpp:44
armnn::LayerType::Resize
@ Resize
armnn::ClLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1192
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
ClSoftmaxWorkload.hpp
ClFillWorkload.hpp
ClFloorFloatWorkload.hpp
armnn::BinaryOperation::Div
@ Div
armnn::ClLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:853
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::ClLogicalOrWorkloadValidate
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalOrWorkload.cpp:20
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
ClLogicalOrWorkload.hpp
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1517
armnn::ClReshapeWorkloadValidate
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClReshapeWorkload.cpp:15
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
armnn::LayerType::Unmap
@ Unmap
ClPermuteWorkload.hpp
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1151
armnn::LayerType::QLstm
@ QLstm
ClSqrtWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1619
armnn::ClArgMinMaxWorkloadValidate
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: ClArgMinMaxWorkload.cpp:31
ClQLstmWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ClLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: ClLayerSupport.cpp:986
armnn::ClLogicalNotWorkloadValidate
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogicalNotWorkload.cpp:20
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1054
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ClLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:871
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
armnn::ClLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1335
ClChannelShuffleWorkload.hpp