ArmNN
 24.02
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
89 #endif
90 
91 
92 namespace armnn
93 {
94 
95 namespace
96 {
97 
98 template<unsigned int FilterSize>
99 bool IsMatchingSize2d(const TensorInfo& weightInfo)
100 {
101  // Width & Height must match.
102  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
103 }
104 
105 template<uint32_t ValidStride>
106 bool IsMatchingStride(uint32_t actualStride)
107 {
108  return ValidStride == actualStride;
109 }
110 
111 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
112 bool IsMatchingStride(uint32_t actualStride)
113 {
114  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
115 }
116 
117 template<typename ... Args>
118 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
119 {
120  IgnoreUnused(reasonIfUnsupported, (args)...);
121 #if defined(ARMCOMPUTECL_ENABLED)
122  return true;
123 #else
124  if (reasonIfUnsupported)
125  {
126  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
127  }
128  return false;
129 #endif
130 }
131 
132 #if defined(ARMCOMPUTECL_ENABLED)
133 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
134 #else
135 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
136 #endif
137 
138 #if defined(ARMCOMPUTECL_ENABLED)
139 template<class FuncType, class... Args>
140 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
141 {
142  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
143  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
144  if (!supported && reasonIfUnsupported)
145  {
146  reasonIfUnsupported.value() = aclStatus.error_description();
147  }
148  return supported;
149 }
150 
151 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
152  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
153 #else
154 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
155  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
156 #endif
157 
158 template<typename FloatFunc, typename Uint8Func, typename ... Params>
159 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
160  DataType dataType,
161  FloatFunc floatFuncPtr,
162  Uint8Func uint8FuncPtr,
163  Params&&... params)
164 {
165  return IsClBackendSupported(reasonIfUnsupported) &&
166  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
167  dataType,
168  floatFuncPtr,
169  floatFuncPtr,
170  uint8FuncPtr,
171  &FalseFunc<>,
172  &FalseFunc<>,
173  std::forward<Params>(params)...);
174 }
175 } // anonymous namespace
176 
178  : m_ModelContextPtr(modelContextPtr)
179 {
180 }
181 
183  : m_ModelContextPtr(nullptr)
184 {
185 }
186 
188  const std::vector<TensorInfo>& infos,
189  const BaseDescriptor& descriptor,
190  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
191  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
192  Optional<std::string&> reasonIfUnsupported) const
193 {
194  switch (type)
195  {
197  return IsActivationSupported(infos[0],
198  infos[1],
199  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
200  reasonIfUnsupported);
201  case LayerType::Addition:
203  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
206  return IsArgMinMaxSupported(infos[0],
207  infos[1],
208  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
209  reasonIfUnsupported);
211  return IsBatchMatMulSupported(infos[0],
212  infos[1],
213  infos[2],
214  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
215  reasonIfUnsupported);
217  return IsBatchNormalizationSupported(infos[0],
218  infos[1],
219  infos[2],
220  infos[3],
221  infos[4],
222  infos[5],
223  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
224  (&descriptor)),
225  reasonIfUnsupported);
227  return IsBatchToSpaceNdSupported(infos[0],
228  infos[1],
229  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
230  reasonIfUnsupported);
231  case LayerType::Cast:
232  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
234  return IsChannelShuffleSupported(infos[0],
235  infos[1],
236  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
237  reasonIfUnsupported);
239  return IsComparisonSupported(infos[0],
240  infos[1],
241  infos[2],
242  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
243  reasonIfUnsupported);
244  case LayerType::Concat:
245  {
246  std::vector<const TensorInfo*> inputInfos;
247  for (uint32_t i = 0; i < (infos.size() - 1); i++)
248  {
249  inputInfos.push_back(&infos[i]);
250  }
251  return IsConcatSupported(inputInfos,
252  infos[infos.size() - 1],
253  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
254  reasonIfUnsupported);
255  }
256  case LayerType::Constant:
257  return IsConstantSupported(infos[0], reasonIfUnsupported);
259  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
261  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
263  {
264  if (infos.size() != 4)
265  {
266  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
267  "TensorInfos should be of format: {input, output, weights, biases}.");
268  }
269 
270  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
271  if (infos[3] == TensorInfo())
272  {
273  return IsConvolution2dSupported(infos[0],
274  infos[1],
275  desc,
276  infos[2],
277  EmptyOptional(),
278  reasonIfUnsupported);
279  }
280  else
281  {
282  return IsConvolution2dSupported(infos[0],
283  infos[1],
284  desc,
285  infos[2],
286  infos[3],
287  reasonIfUnsupported);
288  }
289  }
291  {
292  if (infos.size() != 4)
293  {
294  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
295  "TensorInfos should be of format: {input, output, weights, biases}.");
296  }
297 
298  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
299  if (infos[3] == TensorInfo())
300  {
301  return IsConvolution3dSupported(infos[0],
302  infos[1],
303  desc,
304  infos[2],
305  EmptyOptional(),
306  reasonIfUnsupported);
307  }
308  else
309  {
310  return IsConvolution3dSupported(infos[0],
311  infos[1],
312  desc,
313  infos[2],
314  infos[3],
315  reasonIfUnsupported);
316  }
317  }
319  return IsDepthToSpaceSupported(infos[0],
320  infos[1],
321  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
322  reasonIfUnsupported);
324  {
325  if (infos.size() != 4)
326  {
327  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
328  "TensorInfos should be of format: {input, output, weights, biases}.");
329  }
330 
331  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
332  if (infos[3] == TensorInfo())
333  {
334  return IsDepthwiseConvolutionSupported(infos[0],
335  infos[1],
336  desc,
337  infos[2],
338  EmptyOptional(),
339  reasonIfUnsupported);
340  }
341  else
342  {
343  return IsDepthwiseConvolutionSupported(infos[0],
344  infos[1],
345  desc,
346  infos[2],
347  infos[3],
348  reasonIfUnsupported);
349  }
350  }
352  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
353  case LayerType::Division:
355  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
358  {
359  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
360 
361  switch (desc.m_Operation)
362  {
365  reasonIfUnsupported,
366  infos[0],
367  infos[1],
368  infos[2],
369  nullptr);
372  reasonIfUnsupported,
373  infos[0],
374  infos[1],
375  infos[2],
376  nullptr);
379  reasonIfUnsupported,
380  infos[0],
381  infos[1],
382  infos[2]);
385  reasonIfUnsupported,
386  infos[0],
387  infos[1],
388  infos[2]);
391  reasonIfUnsupported,
392  infos[0],
393  infos[1],
394  infos[2],
395  nullptr);
399  reasonIfUnsupported,
400  infos[0],
401  infos[1],
402  infos[2],
403  desc,
404  nullptr);
407  reasonIfUnsupported,
408  infos[0],
409  infos[1],
410  infos[2],
411  nullptr);
412  default:
413  return false;
414  }
415  }
417  return IsElementwiseUnarySupported(infos[0],
418  infos[1],
419  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
420  reasonIfUnsupported);
421  case LayerType::Fill:
422  return IsFillSupported(infos[0],
423  infos[1],
424  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
425  reasonIfUnsupported);
426  case LayerType::Floor:
427  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
429  return IsFullyConnectedSupported(infos[0],
430  infos[1],
431  infos[2],
432  infos[3],
433  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
434  reasonIfUnsupported);
435  case LayerType::Gather:
436  return IsGatherSupported(infos[0],
437  infos[1],
438  infos[2],
439  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
440  reasonIfUnsupported);
441  case LayerType::GatherNd:
442  return IsGatherNdSupported(infos[0],
443  infos[1],
444  infos[2],
445  reasonIfUnsupported);
446  case LayerType::Input:
447  return IsInputSupported(infos[0], reasonIfUnsupported);
449  return IsInstanceNormalizationSupported(infos[0],
450  infos[1],
451  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
452  (&descriptor)),
453  reasonIfUnsupported);
455  return IsL2NormalizationSupported(infos[0],
456  infos[1],
457  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
458  reasonIfUnsupported);
460  return IsLogicalBinarySupported(infos[0],
461  infos[1],
462  infos[2],
463  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
464  reasonIfUnsupported);
466  return IsLogSoftmaxSupported(infos[0],
467  infos[1],
468  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
469  reasonIfUnsupported);
470  case LayerType::Lstm:
471  return IsLstmSupported(infos[0],
472  infos[1],
473  infos[2],
474  infos[3],
475  infos[4],
476  infos[5],
477  infos[6],
478  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
479  lstmParamsInfo.value(),
480  reasonIfUnsupported);
481  case LayerType::Map:
482  return true;
483  case LayerType::MemCopy:
484  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
486  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
487  case LayerType::Merge:
488  return LayerSupportBase::IsMergeSupported(infos[0],
489  infos[1],
490  infos[2],
491  reasonIfUnsupported);
492  case LayerType::Maximum:
494  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
496  case LayerType::Mean:
497  return IsMeanSupported(infos[0],
498  infos[1],
499  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
500  reasonIfUnsupported);
501  case LayerType::Minimum:
503  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
507  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
510  return IsNormalizationSupported(infos[0],
511  infos[1],
512  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
513  reasonIfUnsupported);
514  case LayerType::Output:
515  return IsOutputSupported(infos[0], reasonIfUnsupported);
516  case LayerType::Pad:
517  return IsPadSupported(infos[0],
518  infos[1],
519  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
520  reasonIfUnsupported);
521  case LayerType::Permute:
522  return IsPermuteSupported(infos[0],
523  infos[1],
524  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
525  reasonIfUnsupported);
527  return IsPooling2dSupported(infos[0],
528  infos[1],
529  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
530  reasonIfUnsupported);
532  return IsPooling3dSupported(infos[0],
533  infos[1],
534  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
535  reasonIfUnsupported);
536  case LayerType::Prelu:
537  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
538  case LayerType::QLstm:
539  return IsQLstmSupported(infos[0],
540  infos[1],
541  infos[2],
542  infos[3],
543  infos[4],
544  infos[5],
545  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
546  lstmParamsInfo.value(),
547  reasonIfUnsupported);
548  case LayerType::Quantize:
549  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
551  return IsQuantizedLstmSupported(infos[0],
552  infos[1],
553  infos[2],
554  infos[3],
555  infos[4],
556  quantizedLstmParamsInfo.value(),
557  reasonIfUnsupported);
558  case LayerType::Rank:
559  return true;
560  case LayerType::Reduce:
561  return IsReduceSupported(infos[0],
562  infos[1],
563  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
564  reasonIfUnsupported);
565  case LayerType::Reshape:
566  return IsReshapeSupported(infos[0],
567  infos[1],
568  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
569  reasonIfUnsupported);
570  case LayerType::Resize:
571  return IsResizeSupported(infos[0],
572  infos[1],
573  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
574  reasonIfUnsupported);
576  return IsReverseV2Supported(infos[0],
577  infos[1],
578  infos[2],
579  reasonIfUnsupported);
580  case LayerType::Shape:
581  return LayerSupportBase::IsShapeSupported(infos[0],
582  infos[1],
583  reasonIfUnsupported);
584  case LayerType::Slice:
585  return IsSliceSupported(infos[0],
586  infos[1],
587  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
588  reasonIfUnsupported);
589  case LayerType::Softmax:
590  return IsSoftmaxSupported(infos[0],
591  infos[1],
592  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
593  reasonIfUnsupported);
595  return IsSpaceToBatchNdSupported(infos[0],
596  infos[1],
597  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
598  reasonIfUnsupported);
600  return IsSpaceToDepthSupported(infos[0],
601  infos[1],
602  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
603  reasonIfUnsupported);
604  case LayerType::Splitter:
605  {
606  std::vector<TensorInfo> outputInfos;
607  for (uint32_t i = 1; i < infos.size(); i++)
608  {
609  outputInfos.push_back(infos[i]);
610  }
611  return IsSplitterSupported(infos[0],
612  {outputInfos.begin(), outputInfos.end()},
613  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
614  reasonIfUnsupported);
615  }
616  case LayerType::Stack:
617  {
618  std::vector<const TensorInfo*> inputInfos;
619  for (uint32_t i = 0; i < infos.size() - 1; i++)
620  {
621  inputInfos.push_back(&infos[i]);
622  }
623  return IsStackSupported(inputInfos,
624  infos[infos.size() - 1],
625  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
626  reasonIfUnsupported);
627  }
629  return IsStridedSliceSupported(infos[0],
630  infos[1],
631  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
632  reasonIfUnsupported);
635  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
637  case LayerType::Tile:
638  return IsTileSupported(infos[0],
639  infos[1],
640  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
641  reasonIfUnsupported);
643  return IsTransposeSupported(infos[0],
644  infos[1],
645  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
646  reasonIfUnsupported);
648  {
649  if (infos.size() != 4)
650  {
651  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
652  "TensorInfos should be of format: {input, output, weights, biases}.");
653  }
654 
655  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
656  if (infos[3] == TensorInfo())
657  {
658  return IsTransposeConvolution2dSupported(infos[0],
659  infos[1],
660  desc,
661  infos[2],
662  EmptyOptional(),
663  reasonIfUnsupported);
664  }
665  else
666  {
667  return IsTransposeConvolution2dSupported(infos[0],
668  infos[1],
669  desc,
670  infos[2],
671  infos[3],
672  reasonIfUnsupported);
673  }
674  }
677  infos[1],
678  infos[2],
679  infos[3],
680  infos[4],
681  infos[5],
682  *(PolymorphicDowncast<const
683  UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
684  lstmParamsInfo.value(),
685  reasonIfUnsupported);
686  case LayerType::Unmap:
687  return true;
688  default:
689  // layers not supported in cl by default:
690  // debug, detectionpostprocess, fakequantization,
691  // precompiled, standin, switch, pooling3d, fused
692  return false;
693  }
694 }
695 
697  const TensorInfo& output,
698  const ActivationDescriptor& descriptor,
699  Optional<std::string&> reasonIfUnsupported) const
700 {
702  reasonIfUnsupported,
703  input,
704  output,
705  descriptor);
706 }
707 
709  const TensorInfo& input1,
710  const TensorInfo& output,
711  Optional<std::string&> reasonIfUnsupported) const
712 {
714  reasonIfUnsupported,
715  input0,
716  input1,
717  output,
718  nullptr);
719 }
720 
722  const TensorInfo& output,
723  const ArgMinMaxDescriptor& descriptor,
724  Optional<std::string&> reasonIfUnsupported) const
725 {
726 
728  reasonIfUnsupported,
729  input,
730  output,
731  descriptor);
732 }
733 
735  const TensorInfo& inputY,
736  const TensorInfo& output,
737  const BatchMatMulDescriptor& descriptor,
738  Optional<std::string&> reasonIfUnsupported) const
739 {
741  reasonIfUnsupported,
742  inputX,
743  inputY,
744  output,
745  descriptor,
746  nullptr);
747 }
748 
750  const TensorInfo& output,
751  const TensorInfo& mean,
752  const TensorInfo& var,
753  const TensorInfo& beta,
754  const TensorInfo& gamma,
755  const BatchNormalizationDescriptor& descriptor,
756  Optional<std::string&> reasonIfUnsupported) const
757 {
759  reasonIfUnsupported,
760  input,
761  output,
762  mean,
763  var,
764  beta,
765  gamma,
766  descriptor,
767  nullptr);
768 }
769 
771  const TensorInfo& output,
772  const BatchToSpaceNdDescriptor& descriptor,
773  Optional<std::string&> reasonIfUnsupported) const
774 {
776  reasonIfUnsupported,
777  input,
778  output,
779  descriptor);
780 }
781 
783  const TensorInfo& output,
784  Optional<std::string&> reasonIfUnsupported) const
785 {
787  reasonIfUnsupported,
788  input,
789  output);
790 }
791 
793  const TensorInfo& output,
794  const ChannelShuffleDescriptor& descriptor,
795  Optional<std::string&> reasonIfUnsupported) const
796 {
798  reasonIfUnsupported,
799  input,
800  output,
801  descriptor);
802 }
803 
805  const TensorInfo& input1,
806  const TensorInfo& output,
807  const ComparisonDescriptor& descriptor,
808  Optional<std::string&> reasonIfUnsupported) const
809 {
811  reasonIfUnsupported,
812  input0,
813  input1,
814  output,
815  descriptor);
816 }
817 
818 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
819  const TensorInfo& output,
820  const OriginsDescriptor& descriptor,
821  Optional<std::string&> reasonIfUnsupported) const
822 {
823  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
824  {
825  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
826  return false;
827  }
828 
829  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
830  if(concatInnerAxis < 3) // Width, height, or channels
831  {
833  reasonIfUnsupported,
834  inputs,
835  output,
836  descriptor);
837  }
838  else if (concatInnerAxis == 3)
839  {
840  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
841  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
842  for (auto& input : inputs)
843  {
844  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
845  {
846  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
847  return false;
848  }
849  }
850  return true; // Sub-tensors support concat along batch
851  }
852  else // > 4 dimensions not supported.
853  {
854  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
855  return false;
856  }
857 }
858 
860  Optional<std::string&> reasonIfUnsupported) const
861 {
863  reasonIfUnsupported,
864  output);
865 }
866 
868  const TensorInfo& output,
869  Optional<std::string&> reasonIfUnsupported) const
870 {
872  reasonIfUnsupported,
873  input,
874  output);
875 }
876 
878  const TensorInfo& output,
879  Optional<std::string&> reasonIfUnsupported) const
880 {
882  reasonIfUnsupported,
883  input,
884  output);
885 }
886 
888  const TensorInfo& output,
889  const Convolution2dDescriptor& descriptor,
890  const TensorInfo& weights,
891  const Optional<TensorInfo>& biases,
892  Optional<std::string&> reasonIfUnsupported) const
893 {
894  bool isFastMathEnabled = false;
895 #if defined(ARMCOMPUTECL_ENABLED)
896  if (m_ModelContextPtr)
897  {
898  if (m_ModelContextPtr.get() != nullptr)
899  {
900  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
901  if (modelOptions)
902  {
903  isFastMathEnabled = modelOptions->IsFastMathEnabled();
904  }
905  }
906  }
907 #endif
908 
910  reasonIfUnsupported,
911  input,
912  output,
913  descriptor,
914  weights,
915  biases,
916  isFastMathEnabled,
917  nullptr);
918 }
919 
921  const TensorInfo& output,
922  const Convolution3dDescriptor& descriptor,
923  const TensorInfo& weights,
924  const Optional<TensorInfo>& biases,
925  Optional<std::string&> reasonIfUnsupported) const
926 {
927  bool isFastMathEnabled = false;
928 #if defined(ARMCOMPUTECL_ENABLED)
929  if (m_ModelContextPtr)
930 {
931  if (m_ModelContextPtr.get() != nullptr)
932  {
933  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
934  if (modelOptions)
935  {
936  isFastMathEnabled = modelOptions->IsFastMathEnabled();
937  }
938  }
939 }
940 #endif
941 
943  reasonIfUnsupported,
944  input,
945  output,
946  descriptor,
947  weights,
948  biases,
949  isFastMathEnabled,
950  nullptr);
951 }
952 
954  const TensorInfo& output,
955  Optional<std::string&> reasonIfUnsupported) const
956 {
958  reasonIfUnsupported,
959  input,
960  output);
961 }
962 
964  const TensorInfo& output,
965  const DepthToSpaceDescriptor& descriptor,
966  Optional<std::string&> reasonIfUnsupported) const
967 {
969  reasonIfUnsupported,
970  input,
971  output,
972  descriptor);
973 }
974 
976  const TensorInfo& output,
977  const DepthwiseConvolution2dDescriptor& descriptor,
978  const TensorInfo& weights,
979  const Optional<TensorInfo>& biases,
980  Optional<std::string&> reasonIfUnsupported) const
981 {
983  reasonIfUnsupported,
984  input,
985  output,
986  descriptor,
987  weights,
988  biases,
989  nullptr);
990 }
991 
993  const TensorInfo& output,
994  const DepthwiseConvolution2dDescriptor& descriptor,
995  const TensorInfo& weights,
996  const Optional<TensorInfo>& biases,
997  Optional<std::string&> reasonIfUnsupported) const
998 {
1000  reasonIfUnsupported,
1001  input,
1002  output,
1003  descriptor,
1004  weights,
1005  biases,
1006  nullptr);
1007 }
1008 
1009 
1011  const TensorInfo& input1,
1012  const TensorInfo& output,
1013  Optional<std::string&> reasonIfUnsupported) const
1014 {
1016  reasonIfUnsupported,
1017  input0,
1018  input1,
1019  output,
1020  nullptr);
1021 }
1022 
1024  const TensorInfo& output,
1025  const ElementwiseUnaryDescriptor& descriptor,
1026  Optional<std::string&> reasonIfUnsupported) const
1027 {
1028  switch(descriptor.m_Operation)
1029  {
1030  case UnaryOperation::Abs:
1032  reasonIfUnsupported,
1033  input,
1034  output);
1035  case UnaryOperation::Exp:
1037  reasonIfUnsupported,
1038  input,
1039  output);
1040  case UnaryOperation::Log:
1042  reasonIfUnsupported,
1043  input,
1044  output);
1047  reasonIfUnsupported,
1048  input,
1049  output);
1050  case UnaryOperation::Neg:
1052  reasonIfUnsupported,
1053  input,
1054  output);
1055  case UnaryOperation::Rsqrt:
1057  reasonIfUnsupported,
1058  input,
1059  output);
1060  case UnaryOperation::Sin:
1062  reasonIfUnsupported,
1063  input,
1064  output);
1065  case UnaryOperation::Sqrt:
1067  reasonIfUnsupported,
1068  input,
1069  output);
1070  default:
1071  return false;
1072  }
1073 }
1074 
1076  const TensorInfo& output,
1077  const FillDescriptor& descriptor,
1078  Optional<std::string&> reasonIfUnsupported) const
1079 {
1080  armnn::IgnoreUnused(input);
1081  armnn::IgnoreUnused(output);
1082  armnn::IgnoreUnused(descriptor);
1083 
1084  return IsClBackendSupported(reasonIfUnsupported);
1085 }
1086 
1088  const TensorInfo& output,
1089  Optional<std::string&> reasonIfUnsupported) const
1090 {
1092  reasonIfUnsupported,
1093  input,
1094  output);
1095 }
1096 
1098  const TensorInfo& output,
1099  const TensorInfo& weights,
1100  const TensorInfo& biases,
1101  const FullyConnectedDescriptor& descriptor,
1102  Optional<std::string&> reasonIfUnsupported) const
1103 {
1105  reasonIfUnsupported,
1106  input,
1107  output,
1108  weights,
1109  biases,
1110  descriptor,
1111  nullptr);
1112 }
1113 
1115  const TensorInfo& input1,
1116  const TensorInfo& output,
1117  const GatherDescriptor& descriptor,
1118  Optional<std::string&> reasonIfUnsupported) const
1119 {
1121  reasonIfUnsupported,
1122  input0,
1123  input1,
1124  output,
1125  descriptor);
1126 }
1127 
1129  const TensorInfo& input1,
1130  const TensorInfo& output,
1131  Optional<std::string&> reasonIfUnsupported) const
1132 {
1134  reasonIfUnsupported,
1135  input0,
1136  input1,
1137  output);
1138 }
1139 
1141  Optional<std::string&> reasonIfUnsupported) const
1142 {
1143  return IsClBackendSupported(reasonIfUnsupported, input);
1144 }
1145 
1147  const TensorInfo& output,
1148  const InstanceNormalizationDescriptor& descriptor,
1149  Optional<std::string&> reasonIfUnsupported) const
1150 {
1152  reasonIfUnsupported,
1153  input,
1154  output,
1155  descriptor);
1156 }
1157 
1159  const TensorInfo& output,
1160  const L2NormalizationDescriptor& descriptor,
1161  Optional<std::string&> reasonIfUnsupported) const
1162 {
1164  reasonIfUnsupported,
1165  input,
1166  output,
1167  descriptor);
1168 }
1169 
1171  const TensorInfo& input1,
1172  const TensorInfo& output,
1173  const LogicalBinaryDescriptor& descriptor,
1174  Optional<std::string&> reasonIfUnsupported) const
1175 {
1176  IgnoreUnused(output);
1177 
1178  switch(descriptor.m_Operation)
1179  {
1182  reasonIfUnsupported,
1183  input0,
1184  input1,
1185  output);
1188  reasonIfUnsupported,
1189  input0,
1190  input1,
1191  output);
1192  default:
1193  return false;
1194  }
1195 }
1196 
1197 
1199  const TensorInfo& output,
1200  const LogSoftmaxDescriptor& descriptor,
1201  Optional<std::string&> reasonIfUnsupported) const
1202 {
1204  reasonIfUnsupported,
1205  input,
1206  output,
1207  descriptor);
1208 }
1209 
1211  const TensorInfo& outputStateIn,
1212  const TensorInfo& cellStateIn,
1213  const TensorInfo& scratchBuffer,
1214  const TensorInfo& outputStateOut,
1215  const TensorInfo& cellStateOut,
1216  const TensorInfo& output,
1217  const LstmDescriptor& descriptor,
1218  const LstmInputParamsInfo& paramsInfo,
1219  Optional<std::string&> reasonIfUnsupported) const
1220 {
1222  reasonIfUnsupported,
1223  input,
1224  outputStateIn,
1225  cellStateIn,
1226  scratchBuffer,
1227  outputStateOut,
1228  cellStateOut,
1229  output,
1230  descriptor,
1231  paramsInfo);
1232 }
1233 
1235  const TensorInfo& input1,
1236  const TensorInfo& output,
1237  Optional<std::string&> reasonIfUnsupported) const
1238 {
1240  reasonIfUnsupported,
1241  input0,
1242  input1,
1243  output);
1244 }
1245 
1247  const TensorInfo& output,
1248  const MeanDescriptor& descriptor,
1249  Optional<std::string&> reasonIfUnsupported) const
1250 {
1252  reasonIfUnsupported,
1253  input,
1254  output,
1255  descriptor);
1256 }
1257 
1259  const TensorInfo& input1,
1260  const TensorInfo& output,
1261  Optional<std::string&> reasonIfUnsupported) const
1262 {
1264  reasonIfUnsupported,
1265  input0,
1266  input1,
1267  output);
1268 }
1269 
1271  const TensorInfo& input1,
1272  const TensorInfo& output,
1273  Optional<std::string&> reasonIfUnsupported) const
1274 {
1276  reasonIfUnsupported,
1277  input0,
1278  input1,
1279  output,
1280  nullptr);
1281 }
1282 
1284  const TensorInfo& output,
1285  const NormalizationDescriptor& descriptor,
1286  Optional<std::string&> reasonIfUnsupported) const
1287 {
1288  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1289 }
1290 
1292  Optional<std::string&> reasonIfUnsupported) const
1293 {
1294  return IsClBackendSupported(reasonIfUnsupported, output);
1295 }
1296 
1298  const TensorInfo& output,
1299  const PadDescriptor& descriptor,
1300  Optional<std::string&> reasonIfUnsupported) const
1301 {
1303  reasonIfUnsupported,
1304  input,
1305  output,
1306  descriptor);
1307 }
1308 
1310  const TensorInfo& output,
1311  const PermuteDescriptor& descriptor,
1312  Optional<std::string&> reasonIfUnsupported) const
1313 {
1314  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1315 }
1316 
1318  const TensorInfo& output,
1319  const Pooling2dDescriptor& descriptor,
1320  Optional<std::string&> reasonIfUnsupported) const
1321 {
1322  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1323 }
1324 
1326  const TensorInfo& output,
1327  const Pooling3dDescriptor& descriptor,
1328  Optional<std::string&> reasonIfUnsupported) const
1329 {
1330  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1331 }
1332 
1334  const armnn::TensorInfo &alpha,
1335  const armnn::TensorInfo &output,
1336  armnn::Optional<std::string &> reasonIfUnsupported) const
1337 {
1338  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1339 }
1340 
1342  const TensorInfo& previousOutputIn,
1343  const TensorInfo& previousCellStateIn,
1344  const TensorInfo& outputStateOut,
1345  const TensorInfo& cellStateOut,
1346  const TensorInfo& output,
1347  const QLstmDescriptor& descriptor,
1348  const LstmInputParamsInfo& paramsInfo,
1349  Optional<std::string&> reasonIfUnsupported) const
1350 {
1351  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1352  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1353  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1354  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1355  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1357  {
1359  reasonIfUnsupported,
1360  input,
1361  previousCellStateIn,
1362  previousOutputIn,
1363  cellStateOut,
1364  outputStateOut,
1365  output,
1366  descriptor,
1367  paramsInfo);
1368  }
1369  else
1370  {
1371  return false;
1372  }
1373 }
1374 
1376  const TensorInfo& previousCellStateIn,
1377  const TensorInfo& previousOutputIn,
1378  const TensorInfo& cellStateOut,
1379  const TensorInfo& output,
1380  const QuantizedLstmInputParamsInfo& paramsInfo,
1381  Optional<std::string&> reasonIfUnsupported) const
1382 {
1384  reasonIfUnsupported,
1385  input,
1386  previousCellStateIn,
1387  previousOutputIn,
1388  cellStateOut,
1389  output,
1390  paramsInfo);
1391 }
1392 
1394  const TensorInfo& output,
1395  Optional<std::string&> reasonIfUnsupported) const
1396 {
1398  reasonIfUnsupported,
1399  input,
1400  output);
1401 }
1402 
1404  const TensorInfo& output,
1405  const ReduceDescriptor& descriptor,
1406  Optional<std::string&> reasonIfUnsupported) const
1407 {
1409  reasonIfUnsupported,
1410  input,
1411  output,
1412  descriptor);
1413 }
1414 
1416  const TensorInfo& output,
1417  const ReshapeDescriptor& descriptor,
1418  Optional<std::string&> reasonIfUnsupported) const
1419 {
1420  IgnoreUnused(descriptor);
1421  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
1422 }
1423 
1425  const TensorInfo& output,
1426  const ResizeDescriptor& descriptor,
1427  Optional<std::string&> reasonIfUnsupported) const
1428 {
1429  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1430 }
1431 
1433  const TensorInfo& axis,
1434  const TensorInfo& output,
1435  Optional<std::string&> reasonIfUnsupported) const
1436 {
1438  reasonIfUnsupported,
1439  input,
1440  axis,
1441  output);
1442 }
1443 
1445  const TensorInfo& output,
1446  const SliceDescriptor& descriptor,
1447  Optional<std::string&> reasonIfUnsupported) const
1448 {
1449  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1450 }
1451 
1453  const TensorInfo& output,
1454  const SoftmaxDescriptor& descriptor,
1455  Optional<std::string&> reasonIfUnsupported) const
1456 {
1457  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1458 }
1459 
1461  const TensorInfo& output,
1462  const SpaceToBatchNdDescriptor& descriptor,
1463  Optional<std::string&> reasonIfUnsupported) const
1464 {
1466  reasonIfUnsupported,
1467  input,
1468  output,
1469  descriptor);
1470 }
1471 
1473  const TensorInfo& output,
1474  const SpaceToDepthDescriptor& descriptor,
1475  Optional<std::string&> reasonIfUnsupported) const
1476 {
1478  reasonIfUnsupported,
1479  input,
1480  output,
1481  descriptor);
1482 }
1483 
1485  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1486  const ViewsDescriptor& descriptor,
1487  Optional<std::string&> reasonIfUnsupported) const
1488 {
1489 #if defined(ARMCOMPUTECL_ENABLED)
1490  // Split along the last dimension, cannot use sub-tensors
1491  // as width and height of the sub-tensors do not match
1492  // the width and height of the parent tensor
1493  // in case of input with more than 2D.
1494  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1495  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1496  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1497  {
1499  reasonIfUnsupported,
1500  input,
1501  outputs,
1502  *splitAxis.begin());
1503  }
1504 #endif
1505  IgnoreUnused(descriptor);
1506  for (auto output : outputs)
1507  {
1508  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1509  {
1510  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1511  return false;
1512  }
1513  }
1514  return true;
1515 }
1516 
1517 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1518  const TensorInfo& output,
1519  const StackDescriptor& descriptor,
1520  Optional<std::string&> reasonIfUnsupported) const
1521 {
1523  reasonIfUnsupported,
1524  inputs,
1525  output,
1526  descriptor);
1527 }
1528 
1530  const TensorInfo& output,
1531  const StridedSliceDescriptor& descriptor,
1532  Optional<std::string&> reasonIfUnsupported) const
1533 {
1535  reasonIfUnsupported,
1536  input,
1537  output,
1538  descriptor);
1539 }
1540 
1542  const TensorInfo& input1,
1543  const TensorInfo& output,
1544  Optional<std::string&> reasonIfUnsupported) const
1545 {
1547  reasonIfUnsupported,
1548  input0,
1549  input1,
1550  output,
1551  nullptr);
1552 }
1553 
1555  const TensorInfo& output,
1556  const TileDescriptor& descriptor,
1557  Optional<std::string&> reasonIfUnsupported) const
1558 {
1560  reasonIfUnsupported,
1561  input,
1562  output,
1563  descriptor);
1564 }
1565 
1567  const TensorInfo& output,
1568  const TransposeConvolution2dDescriptor& descriptor,
1569  const TensorInfo& weights,
1570  const Optional<TensorInfo>& biases,
1571  Optional<std::string&> reasonIfUnsupported) const
1572 {
1574  reasonIfUnsupported,
1575  input,
1576  output,
1577  descriptor,
1578  weights,
1579  biases);
1580 }
1581 
1583  const TensorInfo& output,
1584  const TransposeDescriptor& descriptor,
1585  Optional<std::string&> reasonIfUnsupported) const
1586 {
1587  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1588 }
1589 
1591  const TensorInfo& outputStateIn,
1592  const TensorInfo& cellStateIn,
1593  const TensorInfo& outputStateOut,
1594  const TensorInfo& cellStateOut,
1595  const TensorInfo& output,
1596  const UnidirectionalSequenceLstmDescriptor& descriptor,
1597  const LstmInputParamsInfo& paramsInfo,
1598  Optional<std::string&> reasonIfUnsupported) const
1599 {
1601  reasonIfUnsupported,
1602  input,
1603  outputStateIn,
1604  cellStateIn,
1605  outputStateOut,
1606  cellStateOut,
1607  output,
1608  descriptor,
1609  paramsInfo);
1610 }
1611 
1612 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::ClLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1317
ClPooling3dWorkload.hpp
armnn::ClLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:749
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::ClLogWorkloadValidate
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogWorkload.cpp:18
armnn::BinaryOperation::Mul
@ Mul
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::ClRsqrtWorkloadValidate
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClRsqrtWorkload.cpp:18
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
ClConstantWorkload.hpp
armnn::ClLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1258
ClDepthwiseConvolutionWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
ClArgMinMaxWorkload.hpp
armnn::ClFloorWorkloadValidate
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClFloorFloatWorkload.cpp:14
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::ClLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1333
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::Optional
Definition: Optional.hpp:270
armnn::ClLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:887
armnn::ClLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1210
ClLogWorkload.hpp
armnn::ClLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:818
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
ClElementwiseBinaryWorkload.hpp
armnn::ClTransposeConvolution2dWorkloadValidate
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: ClTransposeConvolution2dWorkload.cpp:26
ClLogSoftmaxWorkload.hpp
armnn::ClCastValidate
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClCastWorkload.cpp:20
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: ClLayerSupport.cpp:151
ClDepthToSpaceWorkload.hpp
armnn::ClStridedSliceWorkloadValidate
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: ClStridedSliceWorkload.cpp:27
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::ClExpWorkloadValidate
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClExpWorkload.cpp:18
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::ClLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:708
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::ClLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:734
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
ClQuantizedLstmWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::ClGatherNdWorkloadValidate
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: ClGatherNdWorkload.cpp:16
armnn::ClSpaceToDepthWorkloadValidate
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: ClSpaceToDepthWorkload.cpp:54
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ClLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1234
armnn::ClSplitterWorkloadValidate
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: ClSplitterWorkload.cpp:31
armnn::ClLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:696
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
ClUnidirectionalSequenceLstmFloatWorkload.hpp
ClNormalizationFloatWorkload.hpp
armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClUnidirectionalSequenceLstmFloatWorkload.cpp:508
armnn::ClBatchToSpaceNdWorkloadValidate
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: ClBatchToSpaceNdWorkload.cpp:17
ClLayerSupport.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
ClReverseV2Workload.hpp
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::ClLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1291
armnn::ClLogicalAndWorkloadValidate
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalAndWorkload.cpp:20
armnn::LayerType::Comparison
@ Comparison
armnn::ClLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:975
armnn::ClLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1297
armnn::ClLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1097
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ClLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1170
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
ClPooling2dWorkload.hpp
armnn::ClDepthToSpaceWorkloadValidate
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: ClDepthToSpaceWorkload.cpp:22
armnn::ClActivationWorkloadValidate
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: ClActivationWorkload.cpp:17
armnn::ClMeanValidate
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: ClMeanWorkload.cpp:17
armnn::ClElementwiseBinaryValidate
arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClElementwiseBinaryWorkload.cpp:64
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
ClReduceWorkload.hpp
ClMeanWorkload.hpp
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ClSinWorkload.hpp
armnn::ClChannelShuffleValidate
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: ClChannelShuffleWorkload.cpp:20
armnn::LayerType::Tile
@ Tile
ClResizeWorkload.hpp
ClComparisonWorkload.hpp
ClPreluWorkload.hpp
armnn::ClLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1283
armnn::ClLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1403
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
IgnoreUnused.hpp
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
ClCastWorkload.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::ClLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1444
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::ClLayerSupport::ClLayerSupport
ClLayerSupport()
Definition: ClLayerSupport.cpp:182
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Definition: ArmComputeUtils.hpp:246
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ClLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1452
ClBatchToSpaceNdWorkload.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::ClLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1246
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::ClL2NormalizationWorkloadValidate
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: ClL2NormalizationFloatWorkload.cpp:17
armnn::ClLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:792
armnn::ClLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:867
armnn::ClLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1087
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
ClDivisionWorkload.hpp
ClGatherNdWorkload.hpp
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::ClNegWorkloadValidate
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClNegWorkload.cpp:18
ClBackendModelContext.hpp
LayerSupportCommon.hpp
ClGatherWorkload.hpp
armnn::ClLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1393
armnn::ClQLstmWorkloadValidate
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClQLstmWorkload.cpp:247
armnn::ClReduceWorkloadValidate
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: ClReduceWorkload.cpp:18
armnn::LayerType::Slice
@ Slice
ClExpWorkload.hpp
armnn::ClLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1270
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
armnn::ClLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1566
ClInstanceNormalizationWorkload.hpp
armnn::ClConstantWorkloadValidate
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
Definition: ClConstantWorkload.cpp:18
armnn::BinaryOperation::Maximum
@ Maximum
armnn::ClPermuteWorkloadValidate
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: ClPermuteWorkload.cpp:17
armnn::ClLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1146
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
ClActivationWorkload.hpp
ClTransposeConvolution2dWorkload.hpp
ClDequantizeWorkload.hpp
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::ClLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1472
armnn::ClLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1415
ClSpaceToBatchNdWorkload.hpp
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::ClLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: ClLayerSupport.cpp:187
armnn::ClResizeWorkloadValidate
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: ClResizeWorkload.cpp:22
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::UnaryOperation::Exp
@ Exp
armnn::ClPooling3dWorkloadValidate
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: ClPooling3dWorkload.cpp:18
armnn::ClStackWorkloadValidate
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: ClStackWorkload.cpp:29
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
ClSliceWorkload.hpp
ClTransposeWorkload.hpp
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
ClStridedSliceWorkload.hpp
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
ClL2NormalizationFloatWorkload.hpp
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::ClLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1590
armnn::ClComparisonWorkloadValidate
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: ClComparisonWorkload.cpp:24
armnn::ClTileWorkloadValidate
arm_compute::Status ClTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: ClTileWorkload.cpp:16
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::ClLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:804
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
ClConcatWorkload.hpp
armnn::ClLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1325
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
ClConvertFp16ToFp32Workload.hpp
ClFullyConnectedWorkload.hpp
armnn::ClConvertFp32ToFp16WorkloadValidate
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp32ToFp16Workload.cpp:44
armnn::ClTransposeWorkloadValidate
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: ClTransposeWorkload.cpp:17
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
ClSpaceToDepthWorkload.hpp
armnn::ClSoftmaxWorkloadValidate
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: ClSoftmaxWorkload.cpp:17
armnn::ClReverseV2WorkloadValidate
arm_compute::Status ClReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: ClReverseV2Workload.cpp:16
armnn::LayerType::Addition
@ Addition
armnn::ClPadValidate
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: ClPadWorkload.cpp:62
ClNegWorkload.hpp
armnn::ClLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1582
armnn::ClLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1554
ArmComputeUtils.hpp
armnn::ClInstanceNormalizationWorkloadValidate
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: ClInstanceNormalizationWorkload.cpp:18
armnn::ClGatherWorkloadValidate
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: ClGatherWorkload.cpp:15
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
ClLogicalNotWorkload.hpp
ClLstmFloatWorkload.hpp
armnn::ClSliceWorkloadValidate
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: ClSliceWorkload.cpp:18
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchMatMulWorkload.cpp:24
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::ClLstmFloatWorkloadValidate
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClLstmFloatWorkload.cpp:244
armnn::BinaryOperation::Power
@ Power
armnn::ClLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1460
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
ClQuantizeWorkload.hpp
armnn::ClLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1158
armnn::ClLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:782
ClBatchNormalizationFloatWorkload.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::ClSqrtWorkloadValidate
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSqrtWorkload.cpp:19
armnn::LayerType::Division
@ Division
armnn::LayerType::Shape
@ Shape
armnn::ClNormalizationWorkloadValidate
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: ClNormalizationFloatWorkload.cpp:19
armnn::ClLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1010
ClLogicalAndWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ClLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1140
armnn::ClLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:963
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
ClPadWorkload.hpp
ClConvolution2dWorkload.hpp
ClRsqrtWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::ClLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:721
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::ClLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:770
ClConvertFp32ToFp16Workload.hpp
ClBackendId.hpp
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ClLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1023
ClAdditionWorkload.hpp
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
ClConvolution3dWorkload.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::ClLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1424
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::ClLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1075
armnn::ClLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:920
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
armnn::ClSpaceToBatchNdWorkloadValidate
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: ClSpaceToBatchNdWorkload.cpp:16
armnn::Status
Status
Definition: Types.hpp:42
armnn::ClLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1375
ClMinimumWorkload.hpp
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::ClLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1541
ClStackWorkload.hpp
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ClLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1309
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::ClConvolution3dWorkloadValidate
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution3dWorkload.cpp:23
armnn::ClLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1128
armnn::LayerType::Fill
@ Fill
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:306
armnn::ClQuantizedLstmWorkloadValidate
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: ClQuantizedLstmWorkload.cpp:18
armnn::ClLogSoftmaxWorkloadValidate
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: ClLogSoftmaxWorkload.cpp:17
armnn::LayerType::Minimum
@ Minimum
ClSplitterWorkload.hpp
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
ClBatchMatMulWorkload.hpp
armnn::ClConcatWorkloadValidate
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: ClConcatWorkload.cpp:27
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::ClLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1484
ClMaximumWorkload.hpp
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
ClSubtractionWorkload.hpp
armnn::ClLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:953
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ClPreluWorkloadValidate
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: ClPreluWorkload.cpp:16
armnn::ClLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1114
armnn::LayerType::Pad
@ Pad
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ClSinWorkloadValidate
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSinWorkload.cpp:18
armnn::LayerType::Rank
@ Rank
armnn::ClAbsWorkloadValidate
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClAbsWorkload.cpp:19
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
InternalTypes.hpp
armnn::ClLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1529
ClMultiplicationWorkload.hpp
armnn::ClLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1517
ClAbsWorkload.hpp
ClTileWorkload.hpp
armnn::LayerType::Input
@ Input
ClReshapeWorkload.hpp
armnn::ClConvertFp16ToFp32WorkloadValidate
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp16ToFp32Workload.cpp:44
armnn::LayerType::Resize
@ Resize
armnn::ClLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1198
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
ClSoftmaxWorkload.hpp
ClFillWorkload.hpp
ClFloorFloatWorkload.hpp
armnn::BinaryOperation::Div
@ Div
armnn::ClLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:859
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::ClLogicalOrWorkloadValidate
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalOrWorkload.cpp:20
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
ClLogicalOrWorkload.hpp
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::ClReshapeWorkloadValidate
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClReshapeWorkload.cpp:15
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
armnn::LayerType::Unmap
@ Unmap
ClPermuteWorkload.hpp
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
ClSqrtWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::ClArgMinMaxWorkloadValidate
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: ClArgMinMaxWorkload.cpp:31
ClQLstmWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ClLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1432
armnn::ClLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: ClLayerSupport.cpp:992
armnn::ClLogicalNotWorkloadValidate
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogicalNotWorkload.cpp:20
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ClLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:877
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
armnn::ClLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1341
ClChannelShuffleWorkload.hpp