ArmNN
 23.02
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
128  {
129  if (infos.size() != 4)
130  {
131  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
132  "TensorInfos should be of format: {input, output, weights, biases}.");
133  }
134 
135  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
136  if (infos[3] == TensorInfo())
137  {
138  return IsConvolution2dSupported(infos[0],
139  infos[1],
140  desc,
141  infos[2],
142  EmptyOptional(),
144  }
145  else
146  {
147  return IsConvolution2dSupported(infos[0],
148  infos[1],
149  desc,
150  infos[2],
151  infos[3],
153  }
154  }
156  return IsDepthToSpaceSupported(infos[0],
157  infos[1],
158  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
161  {
162  if (infos.size() != 4)
163  {
164  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
165  "TensorInfos should be of format: {input, output, weights, biases}.");
166  }
167 
168  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
169  if (infos[3] == TensorInfo())
170  {
171  return IsDepthwiseConvolutionSupported(infos[0],
172  infos[1],
173  desc,
174  infos[2],
175  EmptyOptional(),
177  }
178  else
179  {
180  return IsDepthwiseConvolutionSupported(infos[0],
181  infos[1],
182  desc,
183  infos[2],
184  infos[3],
186  }
187  }
189  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
190  case LayerType::Division:
191  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
193  return IsElementwiseUnarySupported(infos[0],
194  infos[1],
195  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
197  case LayerType::Fill:
198  return IsFillSupported(infos[0],
199  infos[1],
200  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
202  case LayerType::Floor:
203  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
205  return IsFullyConnectedSupported(infos[0],
206  infos[1],
207  infos[2],
208  infos[3],
209  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
211  case LayerType::Gather:
212  return IsGatherSupported(infos[0],
213  infos[1],
214  infos[2],
215  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
217  case LayerType::GatherNd:
218  return IsGatherNdSupported(infos[0],
219  infos[1],
220  infos[2],
222  case LayerType::Input:
223  return IsInputSupported(infos[0], reasonIfUnsupported);
225  return IsInstanceNormalizationSupported(infos[0],
226  infos[1],
227  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
228  (&descriptor)),
231  return IsL2NormalizationSupported(infos[0],
232  infos[1],
233  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
236  return IsLogicalBinarySupported(infos[0],
237  infos[1],
238  infos[2],
239  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
242  return IsLogSoftmaxSupported(infos[0],
243  infos[1],
244  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
246  case LayerType::Lstm:
247  return IsLstmSupported(infos[0],
248  infos[1],
249  infos[2],
250  infos[3],
251  infos[4],
252  infos[5],
253  infos[6],
254  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
255  lstmParamsInfo.value(),
257  case LayerType::QLstm:
258  return IsQLstmSupported(infos[0],
259  infos[1],
260  infos[2],
261  infos[3],
262  infos[4],
263  infos[5],
264  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
265  lstmParamsInfo.value(),
267  case LayerType::Maximum:
268  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
269  case LayerType::Mean:
270  return IsMeanSupported(infos[0],
271  infos[1],
272  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
274  case LayerType::Minimum:
275  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
277  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
279  return IsNormalizationSupported(infos[0],
280  infos[1],
281  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
283  case LayerType::Output:
284  return IsOutputSupported(infos[0], reasonIfUnsupported);
285  case LayerType::Pad:
286  return IsPadSupported(infos[0],
287  infos[1],
288  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
290  case LayerType::Permute:
291  return IsPermuteSupported(infos[0],
292  infos[1],
293  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
296  return IsPooling2dSupported(infos[0],
297  infos[1],
298  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
300  case LayerType::Prelu:
301  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
302  case LayerType::Quantize:
303  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
304  case LayerType::Reshape:
305  return IsReshapeSupported(infos[0],
306  infos[1],
307  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
309  case LayerType::Resize:
310  return IsResizeSupported(infos[0],
311  infos[1],
312  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
314  case LayerType::Reduce:
315  return IsReduceSupported(infos[0],
316  infos[1],
317  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
319  case LayerType::Slice:
320  return IsSliceSupported(infos[0],
321  infos[1],
322  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
324  case LayerType::Softmax:
325  return IsSoftmaxSupported(infos[0],
326  infos[1],
327  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
330  return IsSpaceToBatchNdSupported(infos[0],
331  infos[1],
332  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
335  return IsSpaceToDepthSupported(infos[0],
336  infos[1],
337  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
339  case LayerType::Splitter:
340  {
341  std::vector<TensorInfo> outputInfos;
342  for (uint32_t i = 1; i < infos.size(); i++)
343  {
344  outputInfos.push_back(infos[i]);
345  }
346  return IsSplitterSupported(infos[0],
347  {outputInfos.begin(), outputInfos.end()},
348  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
350  }
351  case LayerType::Stack:
352  {
353  std::vector<const TensorInfo*> inputInfos;
354  for (uint32_t i = 0; i < infos.size() - 1; i++)
355  {
356  inputInfos.push_back(&infos[i]);
357  }
358  return IsStackSupported(inputInfos,
359  infos[infos.size() - 1],
360  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
362  }
364  return IsStridedSliceSupported(infos[0],
365  infos[1],
366  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
369  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
371  return IsTransposeSupported(infos[0],
372  infos[1],
373  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
376  {
377  if (infos.size() != 4)
378  {
379  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
380  "TensorInfos should be of format: {input, output, weights, biases}.");
381  }
382 
383  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
384  if (infos[3] == TensorInfo())
385  {
386  return IsTransposeConvolution2dSupported(infos[0],
387  infos[1],
388  desc,
389  infos[2],
390  EmptyOptional(),
392  }
393  else
394  {
395  return IsTransposeConvolution2dSupported(infos[0],
396  infos[1],
397  desc,
398  infos[2],
399  infos[3],
401  }
402  }
403  case LayerType::Cast:
404  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
406  return IsChannelShuffleSupported(infos[0],
407  infos[1],
408  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
411  {
412  if (infos.size() != 4)
413  {
414  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
415  "TensorInfos should be of format: {input, output, weights, biases}.");
416  }
417 
418  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
419  if (infos[3] == TensorInfo())
420  {
421  return IsConvolution3dSupported(infos[0],
422  infos[1],
423  desc,
424  infos[2],
425  EmptyOptional(),
427  }
428  else
429  {
430  return IsConvolution3dSupported(infos[0],
431  infos[1],
432  desc,
433  infos[2],
434  infos[3],
436  }
437  }
438  case LayerType::Debug:
439  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
441  return IsDetectionPostProcessSupported(infos[0],
442  infos[1],
443  infos[2],
444  infos[3],
445  infos[4],
446  infos[5],
447  infos[6],
448  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
449  (&descriptor)),
452  return IsFakeQuantizationSupported(infos[0],
453  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
455  case LayerType::MemCopy:
456  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
457  case LayerType::Rank:
458  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
459  case LayerType::Shape:
460  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
462  {
463  if (infos.size() != 6)
464  {
465  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
466  "should be of format: {input, outputStateIn, cellStateIn, "
467  "hiddenStateOutputVal, cellStateOutputVal, output}");
468  }
469  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
471  infos[1],
472  infos[2],
473  infos[3],
474  infos[4],
475  infos[5],
476  desc,
477  lstmParamsInfo.value(),
479  }
481  return IsPooling3dSupported(infos[0],
482  infos[1],
483  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
485  case LayerType::Map:
486  return true;
487  case LayerType::Unmap:
488  return true;
491  case LayerType::Merge:
492  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
495  infos[1],
496  infos[2],
497  infos[3],
498  infos[4],
499  quantizedLstmInputParamsInfo.value(),
501  default:
502  // layers not supported in neon by default:
503  // precompiled, standin, switch
504  return false;
505  }
506 }
507 
509  const TensorInfo& output,
510  const ActivationDescriptor& descriptor,
511  Optional<std::string&> reasonIfUnsupported) const
512 {
513  bool supported = true;
514 
515  // Define supported types.
516  std::array<DataType,6> supportedTypes = {
522  };
523 
524  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
525  "Reference activation: input type not supported.");
526 
527  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
528  "Reference activation: output type not supported.");
529 
531  "Reference activation: input and output types mismatched.");
532 
534  "Reference activation: input and output shapes are of different rank.");
535 
536 
537  struct ActivationFunctionSupported : public Rule
538  {
539  ActivationFunctionSupported(const ActivationDescriptor& desc)
540  {
541  switch(desc.m_Function)
542  {
555  {
556  m_Res = true;
557  break;
558  }
559  default:
560  {
561  m_Res = false;
562  break;
563  }
564  }
565  }
566  };
567 
568  // Function is supported
569  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
570  "Reference activation: function not supported.");
571 
572  return supported;
573 }
574 
576  const TensorInfo& input1,
577  const TensorInfo& output,
578  Optional<std::string&> reasonIfUnsupported) const
579 {
580  bool supported = true;
581 
582  std::array<DataType,7> supportedTypes = {
589  };
590 
591  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
592  "Reference addition: input 0 is not a supported type.");
593 
594  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
595  "Reference addition: input 1 is not a supported type.");
596 
597  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
598  "Reference addition: output is not a supported type.");
599 
601  "Reference addition: input 0 and Input 1 types are mismatched");
602 
604  "Reference addition: input and output types are mismatched");
605 
607  "Reference addition: shapes are not suitable for implicit broadcast.");
608 
609  return supported;
610 }
611 
613  const armnn::ArgMinMaxDescriptor &descriptor,
614  armnn::Optional<std::string &> reasonIfUnsupported) const
615 {
617 
618  std::array<DataType, 8> supportedInputTypes =
619  {
627  };
628 
629  std::array<DataType,2> supportedOutputTypes = {
632  };
633 
634  bool supported = true;
635 
636  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
637  "Reference ArgMinMax: input is not a supported type.");
638  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
639  "Reference ArgMinMax: output type not supported");
640 
641  return supported;
642 }
643 
645  const TensorInfo& inputY,
646  const TensorInfo& output,
647  const BatchMatMulDescriptor& descriptor,
648  Optional<std::string &> reasonIfUnsupported) const
649 {
651 
652  std::array<DataType, 6> supportedTypes =
653  {
659  };
660 
661  bool supported = true;
662 
663  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
664  "Reference batch matrix multiplication: input X is not a supported type");
665 
666  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
667  "Reference batch matrix multiplication: input Y is not a supported type");
668 
669  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
670  "Reference batch matrix multiplication: output is not a supported type");
671 
672  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
673  "Reference batch matrix multiplication: input X and input Y types are mismatched");
674 
676  "Reference batch matrix multiplication: inputs and output types are mismatched");
677 
680  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
681 
684  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
685 
686  return supported;
687 }
688 
690  const TensorInfo& output,
691  const TensorInfo& mean,
692  const TensorInfo& variance,
693  const TensorInfo& beta,
694  const TensorInfo& gamma,
695  const BatchNormalizationDescriptor& descriptor,
696  Optional<std::string&> reasonIfUnsupported) const
697 {
699 
700  std::array<DataType, 6> supportedTypes =
701  {
707  };
708 
709  bool supported = true;
710 
711  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
712  "Reference batch normalization: input is not a supported type.");
713 
714  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
715  "Reference batch normalization: output is not a supported type.");
716 
718  "Reference batch normalization: input and output types are mismatched");
719 
720  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
721  "Reference batch normalization: mean is not a supported type.");
722 
723  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
724  "Reference batch normalization: variance is not a supported type.");
725 
726  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
727  "Reference batch normalization: beta is not a supported type.");
728 
729  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
730  "Reference batch normalization: gamma is not a supported type.");
731 
732  return supported;
733 }
734 
736  const TensorInfo& output,
737  const BatchToSpaceNdDescriptor& descriptor,
738  Optional<std::string&> reasonIfUnsupported) const
739 {
741 
742  bool supported = true;
743 
744  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
745  std::string inputTensorStr = "input";
746  std::string outputTensorStr = "output";
747 
748  // Define supported types.
749  std::array<DataType,6> supportedTypes =
750  {
756  };
757 
758  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
759  "Reference BatchToSpaceNd: input type not supported.");
760 
761  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
762  "Reference BatchToSpaceNd: output type not supported.");
763 
765  "Reference BatchToSpaceNd: input and output types mismatched.");
766 
769  CreateIncorrectDimensionsErrorMsg(4,
771  batchToSpaceNdLayerStr,
772  outputTensorStr).data());
773 
774  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
776  CreateIncorrectDimensionsErrorMsg(4,
777  input.GetNumDimensions(),
778  batchToSpaceNdLayerStr,
779  inputTensorStr).data());
780 
781  return supported;
782 }
783 
785  const TensorInfo& output,
786  Optional<std::string&> reasonIfUnsupported) const
787 {
788  std::array<DataType, 9> supportedInputTypes =
789  {
797  };
798 
799  bool supported = true;
800  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
801  "Reference cast: input is not a supported type");
802 
803 
804  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
805  "Reference cast: output is not a supported type");
806 
808  "Reference cast: input and output shapes have different number of total elements");
809 
810  return supported;
811 }
812 
814  const TensorInfo& output,
815  const ChannelShuffleDescriptor& descriptor,
816  Optional<std::string&> reasonIfUnsupported) const
817 {
819  bool supported = true;
820 
821  // Define supported output and inputs types.
822  std::array<DataType, 7> supportedTypes =
823  {
830  };
831 
832  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
833  "Reference ChannelShuffle: input is not a supported type.");
834 
835  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
836  "Reference ChannelShuffle: output is not a supported type.");
837 
839  "Reference ChannelShuffle: input and output types are mismatched.");
840 
841  return supported;
842 }
843 
844 
846  const TensorInfo& input1,
847  const TensorInfo& output,
848  const ComparisonDescriptor& descriptor,
849  Optional<std::string&> reasonIfUnsupported) const
850 {
852  std::array<DataType, 8> supportedInputTypes =
853  {
861  };
862 
863  bool supported = true;
864  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
865  "Reference comparison: input 0 is not a supported type");
866 
868  "Reference comparison: input 0 and Input 1 types are mismatched");
869 
871  "Reference comparison: output is not of type Boolean");
872 
873  return supported;
874 }
875 
876 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
877  const TensorInfo& output,
878  const OriginsDescriptor& descriptor,
879  Optional<std::string&> reasonIfUnsupported) const
880 {
882 
883  bool supported = true;
884  std::array<DataType,7> supportedTypes =
885  {
892  };
893 
894  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
895  "Reference concatenation: output type not supported");
896  for (const TensorInfo* input : inputs)
897  {
898  ARMNN_ASSERT(input != nullptr);
899  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
900  "Reference concatenation: input type not supported");
901 
903  "Reference concatenation: input and output types mismatched.");
904  }
905 
906  return supported;
907 }
908 
910  Optional<std::string&> reasonIfUnsupported) const
911 {
912  std::array<DataType,8> supportedTypes =
913  {
921  };
922 
923  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
924  "Reference constant: output is not a supported type.");
925 }
926 
928  const TensorInfo& output,
929  Optional<std::string&> reasonIfUnsupported) const
930 {
932  input.GetDataType(),
933  &TrueFunc<>,
934  &FalseInputFuncF32<>,
935  &FalseFuncU8<>,
936  &FalseFuncI32<>,
937  &FalseFuncU8<>) &&
940  &FalseOutputFuncF16<>,
941  &TrueFunc<>,
942  &FalseFuncU8<>,
943  &FalseFuncI32<>,
944  &FalseFuncU8<>));
945 }
946 
948  const TensorInfo& output,
949  Optional<std::string&> reasonIfUnsupported) const
950 {
952  input.GetDataType(),
953  &FalseInputFuncF16<>,
954  &TrueFunc<>,
955  &FalseFuncU8<>,
956  &FalseFuncI32<>,
957  &FalseFuncU8<>) &&
960  &TrueFunc<>,
961  &FalseOutputFuncF32<>,
962  &FalseFuncU8<>,
963  &FalseFuncI32<>,
964  &FalseFuncU8<>));
965 }
966 
968  const TensorInfo& output,
969  const Convolution2dDescriptor& descriptor,
970  const TensorInfo& weights,
971  const Optional<TensorInfo>& biases,
972  Optional<std::string&> reasonIfUnsupported) const
973 {
974  bool supported = true;
975 
976  // Define supported types.
977  std::array<DataType,7> supportedTypes =
978  {
985  };
986 
987  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
988  "Reference Convolution2d: input is not a supported type.");
989 
990  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
991  "Reference Convolution2d: output is not a supported type.");
992 
994  "Reference Convolution2d: input and output types mismatched.");
995 
996 
997  const DataType inputType = input.GetDataType();
998  if (IsQuantized8BitType(inputType))
999  {
1000  std::array<DataType, 3> supportedWeightTypes =
1001  {
1005  };
1006 
1007  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1008  "Reference Convolution2d: weights type not supported for quantized input.");
1009  }
1010  else
1011  {
1012  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1013  "Reference Convolution2d: weights is not a supported type.");
1014 
1016  "Reference Convolution2d: input and weights types mismatched.");
1017  }
1018 
1019  if (biases.has_value())
1020  {
1021  std::array<DataType,4> biasesSupportedTypes =
1022  {
1026  };
1027 
1028  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1029  "Reference Convolution2d: biases is not a supported type.");
1030  }
1032 
1033  return supported;
1034 }
1035 
1037  const TensorInfo& output,
1038  const Convolution3dDescriptor& descriptor,
1039  const TensorInfo& weights,
1040  const Optional<TensorInfo>& biases,
1041  Optional<std::string&> reasonIfUnsupported) const
1042 {
1043  bool supported = true;
1044 
1045  // Define supported types.
1046  std::array<DataType,7> supportedTypes =
1047  {
1054  };
1055 
1056  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1057  "Reference Convolution3d: input is not a supported type.");
1058 
1059  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1060  "Reference Convolution3d: output is not a supported type.");
1061 
1063  "Reference Convolution3d: input and output types mismatched.");
1064 
1065  const DataType inputType = input.GetDataType();
1066  if (IsQuantized8BitType(inputType))
1067  {
1068  std::array<DataType, 3> supportedWeightTypes =
1069  {
1073  };
1074 
1075  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1076  "Reference Convolution3d: weights type not supported for quantized input.");
1077  }
1078  else
1079  {
1080  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1081  "Reference Convolution3d: weights is not a supported type.");
1082 
1084  "Reference Convolution3d: input and weights types mismatched.");
1085  }
1086 
1087  if (biases.has_value())
1088  {
1089  std::array<DataType,4> biasesSupportedTypes =
1090  {
1094  };
1095 
1096  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1097  "Reference Convolution3d: biases is not a supported type.");
1098  }
1100 
1101  return supported;
1102 }
1103 
1105  const TensorInfo& output,
1106  Optional<std::string&> reasonIfUnsupported) const
1107 {
1108  bool supported = true;
1109 
1110  std::array<DataType, 8> supportedTypes =
1111  {
1120  };
1121 
1122  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1123  "Reference for Debug layer: input type not supported");
1124 
1125  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1126  "Reference for Debug layer: output type not supported");
1127 
1129  "Reference for Debug layer: input and output types are mismatched");
1130 
1131  return supported;
1132 }
1133 
1135  const TensorInfo& output,
1136  const DepthToSpaceDescriptor& descriptor,
1137  Optional<std::string&> reasonIfUnsupported) const
1138 {
1140  bool supported = true;
1141 
1142  std::array<DataType,6> supportedTypes =
1143  {
1149  };
1150 
1151  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1152  "Reference DepthToSpace: input type not supported");
1153 
1154  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1155  "Reference DepthToSpace: output type not supported");
1156 
1158  "Reference DepthToSpace: input and output types are mismatched");
1159 
1160  return supported;
1161 }
1162 
1164  const TensorInfo& output,
1165  const DepthwiseConvolution2dDescriptor& descriptor,
1166  const TensorInfo& weights,
1167  const Optional<TensorInfo>& biases,
1168  Optional<std::string&> reasonIfUnsupported) const
1169 {
1171  bool supported = true;
1172 
1173  // Define supported types.
1174  std::array<DataType,7> supportedTypes =
1175  {
1182  };
1183 
1184  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1185  "Reference DepthwiseConvolution2d: input is not a supported type.");
1186 
1187  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1188  "Reference DepthwiseConvolution2d: output is not a supported type.");
1189 
1191  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1192 
1193  const DataType inputType = input.GetDataType();
1194  if (IsQuantized8BitType(inputType))
1195  {
1196  std::array<DataType, 3> supportedWeightTypes =
1197  {
1201  };
1202 
1203  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1204  "Reference DepthwiseConvolution2d: weights type not supported for "
1205  "quantized input.");
1206  }
1207  else
1208  {
1209  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1210  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1211 
1213  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1214  }
1215 
1216  if (biases.has_value())
1217  {
1218  std::array<DataType,4> biasesSupportedTypes =
1219  {
1223  };
1224  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1225  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1226  }
1227 
1228  return supported;
1229 
1230 }
1231 
1233  const TensorInfo& output,
1234  Optional<std::string&> reasonIfUnsupported) const
1235 {
1236  bool supported = true;
1237 
1238  std::array<DataType,5> supportedInputTypes = {
1244  };
1245 
1246  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1247  "Reference for Dequantize layer: input type not supported.");
1248 
1250  "Reference for Dequantize layer: per-axis quantized input not supported.");
1251 
1252  std::array<DataType,3> supportedOutputTypes = {
1255  };
1256 
1257  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1258  "Reference for Dequantize layer: output type not supported.");
1259 
1261  "Reference for Dequantize layer: input/output shapes have different num total "
1262  "elements.");
1263 
1264  return supported;
1265 }
1266 
1268  const TensorInfo& scores,
1269  const TensorInfo& anchors,
1270  const TensorInfo& detectionBoxes,
1271  const TensorInfo& detectionClasses,
1272  const TensorInfo& detectionScores,
1273  const TensorInfo& numDetections,
1274  const DetectionPostProcessDescriptor& descriptor,
1275  Optional<std::string&> reasonIfUnsupported) const
1276 {
1278 
1279  bool supported = true;
1280 
1281  std::array<DataType,6> supportedInputTypes =
1282  {
1288  };
1289 
1290  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1291  "Reference DetectionPostProcess: input 0 is not a supported type.");
1292 
1293  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1294  "Reference DetectionPostProcess: input 1 is not a supported type.");
1295 
1296  return supported;
1297 }
1298 
1300  const TensorInfo& output,
1301  const DepthwiseConvolution2dDescriptor& descriptor,
1302  const TensorInfo& weights,
1303  const Optional<TensorInfo>& biases,
1304  Optional<std::string&> reasonIfUnsupported) const
1305 {
1307 }
1308 
1310  const TensorInfo& input1,
1311  const TensorInfo& output,
1312  Optional<std::string&> reasonIfUnsupported) const
1313 {
1314  bool supported = true;
1315 
1316  std::array<DataType,7> supportedTypes = {
1323  };
1324 
1325  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1326  "Reference division: input 0 is not a supported type.");
1327 
1328  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1329  "Reference division: input 1 is not a supported type.");
1330 
1331  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1332  "Reference division: output is not a supported type.");
1333 
1335  "Reference division: input 0 and Input 1 types are mismatched");
1336 
1338  "Reference division: input and output types are mismatched");
1339 
1341  "Reference division: shapes are not suitable for implicit broadcast.");
1342 
1343  return supported;
1344 }
1345 
1347  const TensorInfo& output,
1348  const ElementwiseUnaryDescriptor& descriptor,
1349  Optional<std::string&> reasonIfUnsupported) const
1350 {
1352 
1353  std::array<DataType, 7> supportedTypes =
1354  {
1361  };
1362 
1363  std::array<DataType, 1> logicalSupportedTypes =
1364  {
1366  };
1367 
1368  bool supported = true;
1369 
1370  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1371  {
1372  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1373  "Reference elementwise unary: input type not supported");
1374 
1375  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1376  "Reference elementwise unary: output type not supported");
1377  }
1378  else
1379  {
1380  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1381  "Reference elementwise unary: input type not supported");
1382 
1383  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1384  "Reference elementwise unary: output type not supported");
1385  }
1386 
1388  "Reference elementwise unary: input and output types not matching");
1389 
1391  "Reference elementwise unary: input and output shapes"
1392  "have different number of total elements");
1393 
1394  return supported;
1395 }
1396 
1398  const FakeQuantizationDescriptor& descriptor,
1399  Optional<std::string&> reasonIfUnsupported) const
1400 {
1402  bool supported = true;
1403 
1404  std::array<DataType,1> supportedTypes =
1405  {
1407  };
1408 
1409  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1410  "Reference fake quantization: input type not supported.");
1411 
1412  return supported;
1413 }
1414 
1416  const TensorInfo& output,
1417  const FillDescriptor& descriptor,
1418  Optional<std::string&> reasonIfUnsupported) const
1419 {
1422 
1423  bool supported = true;
1424 
1425  std::array<DataType,3> supportedTypes =
1426  {
1430  };
1431 
1433  "Reference Fill: input type not supported.");
1434 
1435  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1436  "Reference Fill: output type not supported.");
1437  return supported;
1438 }
1439 
1441  const TensorInfo& output,
1442  Optional<std::string&> reasonIfUnsupported) const
1443 {
1445  bool supported = true;
1446 
1447  std::array<DataType,3> supportedTypes =
1448  {
1451  };
1452 
1453  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1454  "Reference Floor: input type not supported.");
1455 
1456  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1457  "Reference Floor: output type not supported.");
1458 
1459  return supported;
1460 }
1461 
1463  const TensorInfo& output,
1464  const TensorInfo& weights,
1465  const TensorInfo& biases,
1466  const FullyConnectedDescriptor& descriptor,
1467  Optional<std::string&> reasonIfUnsupported) const
1468 {
1469  bool supported = true;
1470 
1471  // Define supported types.
1472  std::array<DataType,6> supportedTypes =
1473  {
1479  };
1480 
1481  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1482  "Reference Fully Connected: input type not supported.");
1483 
1484  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1485  "Reference Fully Connected: output type not supported.");
1486 
1487  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1488  "Reference Fully Connected: weights type not supported.");
1489 
1491  "Reference Fully Connected: input and output types mismatched.");
1492 
1493  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1494  "Reference Fully Connected: weights is not a supported type.");
1495 
1497  "Reference Fully Connected: input and weights types mismatched.");
1498 
1499  if (descriptor.m_BiasEnabled)
1500  {
1501  // Defined supported types for bias
1502  std::array<DataType, 5>
1503  supportedBiasTypes =
1504  {
1509  };
1510 
1511  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1512  "Reference Fully Connected: bias type not supported.");
1513 
1515  "Reference Fully Connected: bias and weight types mismatch.");
1516 
1518  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1519 
1521  "Reference Fully Connected: bias must have 1 dimension.");
1522 
1523  }
1524 
1525  return supported;
1526 }
1527 
1529  const armnn::TensorInfo& input1,
1530  const armnn::TensorInfo& output,
1531  armnn::Optional<std::string&> reasonIfUnsupported) const
1532 {
1533  bool supported = true;
1534  std::array<DataType,7> supportedTypes =
1535  {
1542  };
1543 
1544  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1545  "Reference GatherNd: input type not supported");
1546 
1547  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1548  "Reference GatherNd: output type not supported");
1549 
1551  "Reference GatherNd: indices (input1) type not supported");
1552 
1554  "Reference GatherNd: input and output types not matching");
1555 
1556  return supported;
1557 }
1558 
1560  const armnn::TensorInfo& input1,
1561  const armnn::TensorInfo& output,
1562  const GatherDescriptor& descriptor,
1563  armnn::Optional<std::string&> reasonIfUnsupported) const
1564 {
1565  bool supported = true;
1566  std::array<DataType,7> supportedTypes =
1567  {
1574  };
1575 
1577  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1578  "Reference Gather: input type not supported");
1579 
1580  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1581  "Reference Gather: output type not supported");
1582 
1584  "Reference Gather: indices (input1) type not supported");
1585 
1587  "Reference Gather: input and output types not matching");
1588 
1589  return supported;
1590 }
1591 
1593  Optional<std::string&> /*reasonIfUnsupported*/) const
1594 {
1595  return true;
1596 }
1597 
1599  const TensorInfo& output,
1600  const InstanceNormalizationDescriptor& descriptor,
1601  Optional<std::string&> reasonIfUnsupported) const
1602 {
1604  // Define supported types
1605  std::array<DataType, 3> supportedTypes =
1606  {
1609  };
1610 
1611  bool supported = true;
1612 
1613  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1614  "Reference Instance Normalization: input type not supported.");
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1617  "Reference Instance Normalization: output type not supported.");
1618 
1620  "Reference Instance Normalization: input and output types mismatched.");
1621 
1623  "Reference Instance Normalization: input and output shapes have different "
1624  "num total elements.");
1625 
1626  return supported;
1627 }
1628 
1630  const TensorInfo& output,
1631  const L2NormalizationDescriptor& descriptor,
1632  Optional<std::string&> reasonIfUnsupported) const
1633 {
1635  // Define supported types
1636  std::array<DataType, 6> supportedTypes =
1637  {
1643  };
1644 
1645  bool supported = true;
1646 
1647  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1648  "Reference L2normalization: input type not supported.");
1649 
1650  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1651  "Reference L2normalization: output type not supported.");
1652 
1654  "Reference L2normalization: input and output types mismatched.");
1655 
1657  "Reference L2normalization: input and output shapes have different "
1658  "num total elements.");
1659 
1660  return supported;
1661 }
1662 
1664  const TensorInfo& input1,
1665  const TensorInfo& output,
1666  const LogicalBinaryDescriptor& descriptor,
1667  Optional<std::string&> reasonIfUnsupported) const
1668 {
1670 
1671  std::array<DataType, 1> supportedTypes =
1672  {
1674  };
1675 
1676  bool supported = true;
1677  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1678  "Reference LogicalBinary: input 0 type not supported");
1679  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1680  "Reference LogicalBinary: input 1 type not supported");
1681 
1683  "Reference LogicalBinary: input and output types do not match");
1684 
1685  return supported;
1686 }
1687 
1689  const TensorInfo& output,
1690  const LogSoftmaxDescriptor& descriptor,
1691  Optional<std::string&> reasonIfUnsupported) const
1692 {
1694 
1695  std::array<DataType, 3> supportedTypes =
1696  {
1699  };
1700 
1701  bool supported = true;
1702  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1703  "Reference LogSoftmax: input type not supported");
1704 
1705  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1706  "Reference LogSoftmax: output type not supported");
1707 
1709  "Reference LogSoftmax: input and output types do not match");
1710 
1711  return supported;
1712 }
1713 
1715  const TensorInfo& outputStateIn,
1716  const TensorInfo& cellStateIn,
1717  const TensorInfo& scratchBuffer,
1718  const TensorInfo& outputStateOut,
1719  const TensorInfo& cellStateOut,
1720  const TensorInfo& output,
1721  const LstmDescriptor& descriptor,
1722  const LstmInputParamsInfo& paramsInfo,
1723  Optional<std::string&> reasonIfUnsupported) const
1724 {
1727 
1728  bool supported = true;
1729 
1730  std::array<DataType,3> supportedTypes = {
1733  };
1734 
1735  // check inputs and outputs
1736  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1737  "Reference Lstm: input is not a supported type.");
1739  "Reference Lstm: input and outputStateIn types are mismatched");
1741  "Reference Lstm: input and cellStateIn types are mismatched");
1743  "Reference Lstm: input and scratchBuffer types are mismatched");
1745  "Reference Lstm: input and outputStateOut types are mismatched");
1747  "Reference Lstm: input and cellStateOut types are mismatched");
1748 
1750  "Reference Lstm: input and output types are mismatched");
1751  // check layer parameters
1752  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1753  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1754  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1755  "Reference Lstm: input and InputToCellWeights types are mismatched");
1756  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1757  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1758  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1759  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1760  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1761  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1762  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1763  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1764  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1765  "Reference Lstm: input and ForgetGateBias types are mismatched");
1766  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1767  "Reference Lstm: input and CellBias types are mismatched");
1768  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1769  "Reference Lstm: input and OutputGateBias types are mismatched");
1770  if (!descriptor.m_CifgEnabled)
1771  {
1772  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1773  "Reference Lstm: input and InputToInputWeights types are mismatched");
1774  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1776  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1777  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1778  "Reference Lstm: input and InputGateBias types are mismatched");
1779  if (descriptor.m_PeepholeEnabled)
1780  {
1781  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1783  "Reference Lstm: input and CellToInputWeights types are mismatched");
1784  }
1785  }
1786  if (descriptor.m_PeepholeEnabled)
1787  {
1788  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1789  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1790  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1791  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1792  }
1793  if (descriptor.m_ProjectionEnabled)
1794  {
1795  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1796  "Reference Lstm: input and mProjectionWeights types are mismatched");
1797  if (paramsInfo.m_ProjectionBias != nullptr)
1798  {
1799  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1800  "Reference Lstm: input and ProjectionBias types are mismatched");
1801  }
1802  }
1803  if (descriptor.m_LayerNormEnabled)
1804  {
1805  if (!descriptor.m_CifgEnabled)
1806  {
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1809  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1810  }
1811  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1813  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1814  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1816  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1819  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1820  }
1821 
1822  return supported;
1823 }
1824 
1826  const TensorInfo& input1,
1827  const TensorInfo& output,
1828  Optional<std::string&> reasonIfUnsupported) const
1829 {
1830  bool supported = true;
1831 
1832  std::array<DataType,7> supportedTypes = {
1839  };
1840 
1841  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1842  "Reference maximum: input 0 is not a supported type.");
1843 
1844  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1845  "Reference maximum: input 1 is not a supported type.");
1846 
1847  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1848  "Reference maximum: output is not a supported type.");
1849 
1851  "Reference maximum: input 0 and Input 1 types are mismatched");
1852 
1854  "Reference maximum: input and output types are mismatched");
1855 
1857  "Reference maximum: shapes are not suitable for implicit broadcast.");
1858 
1859  return supported;
1860 }
1861 
1863  const TensorInfo& output,
1864  const MeanDescriptor& descriptor,
1865  Optional<std::string&> reasonIfUnsupported) const
1866 {
1867  bool supported = true;
1868  std::string meanLayerStr = "Mean";
1869  std::string outputTensorStr = "output";
1870 
1871  std::array<DataType,6> supportedTypes =
1872  {
1878  };
1879 
1880  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1881  "Reference Mean: input type not supported.");
1882 
1884  "Reference Mean: input and output types are mismatched");
1885 
1886  if (descriptor.m_KeepDims)
1887  {
1890  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1892  meanLayerStr, outputTensorStr).data());
1893  }
1894  else if (descriptor.m_Axis.empty())
1895  {
1898  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1899  meanLayerStr, outputTensorStr).data());
1900  }
1901  else
1902  {
1903  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1904 
1905  if (outputDim > 0)
1906  {
1907  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1909  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1910  meanLayerStr, outputTensorStr).data());
1911  }
1912  else
1913  {
1916  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1917  meanLayerStr, outputTensorStr).data());
1918  }
1919  }
1920 
1921  return supported;
1922 }
1923 
1925  const TensorInfo &output,
1926  Optional<std::string &> reasonIfUnsupported) const
1927 {
1928  bool supported = true;
1929 
1930  std::array<DataType,7> supportedTypes =
1931  {
1939  };
1940 
1941  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1942  "Reference MemCopy: input type not supported");
1943 
1944  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1945  "Reference MemCopy: output type not supported");
1946 
1948  "Reference MemCopy: input and output types are mismatched");
1949 
1950  return supported;
1951 }
1952 
1954  const TensorInfo& input1,
1955  const TensorInfo& output,
1956  Optional<std::string&> reasonIfUnsupported) const
1957 {
1958  bool supported = true;
1959 
1960  std::array<DataType,7> supportedTypes = {
1967  };
1968 
1969  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1970  "Reference minimum: input 0 is not a supported type.");
1971 
1972  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1973  "Reference minimum: input 1 is not a supported type.");
1974 
1975  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1976  "Reference minimum: output is not a supported type.");
1977 
1979  "Reference minimum: input 0 and Input 1 types are mismatched");
1980 
1982  "Reference minimum: input and output types are mismatched");
1983 
1985  "Reference minimum: shapes are not suitable for implicit broadcast.");
1986 
1987  return supported;
1988 }
1989 
1991  const TensorInfo& input1,
1992  const TensorInfo& output,
1993  Optional<std::string&> reasonIfUnsupported) const
1994 {
1995  bool supported = true;
1996 
1997  std::array<DataType,7> supportedTypes = {
2004  };
2005 
2006  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2007  "Reference multiplication: input 0 is not a supported type.");
2008 
2009  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2010  "Reference multiplication: input 1 is not a supported type.");
2011 
2012  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2013  "Reference multiplication: output is not a supported type.");
2014 
2016  "Reference multiplication: input 0 and Input 1 types are mismatched");
2017 
2019  "Reference multiplication: input and output types are mismatched");
2020 
2022  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2023 
2024  return supported;
2025 }
2026 
2028  const TensorInfo& output,
2029  const NormalizationDescriptor& descriptor,
2030  Optional<std::string&> reasonIfUnsupported) const
2031 {
2033 
2034  // Define supported types
2035  std::array<DataType, 6> supportedTypes =
2036  {
2042  };
2043 
2044  bool supported = true;
2045 
2046  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2047  "Reference normalization: input type not supported.");
2048 
2049  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2050  "Reference normalization: output type not supported.");
2051 
2053  "Reference normalization: input and output shapes have different "
2054  "num total elements.");
2055 
2056  return supported;
2057 }
2058 
2060  Optional<std::string&> /*reasonIfUnsupported*/) const
2061 {
2062  return true;
2063 }
2064 
2066  const TensorInfo& output,
2067  const PadDescriptor& descriptor,
2068  Optional<std::string&> reasonIfUnsupported) const
2069 {
2071  bool supported = true;
2072 
2073  // Define supported output and inputs types.
2074  std::array<DataType,6> supportedTypes =
2075  {
2081  };
2082 
2083  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2084  "Reference pad: input is not a supported type.");
2085 
2086  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2087  "Reference pad: output is not a supported type.");
2088 
2090  "Reference pad: input and output types are mismatched.");
2091 
2092  return supported;
2093 }
2094 
2096  const TensorInfo& output,
2097  const PermuteDescriptor& descriptor,
2098  Optional<std::string&> reasonIfUnsupported) const
2099 {
2101  bool supported = true;
2102 
2103  // Define supported output and inputs types.
2104  std::array<DataType, 6> supportedTypes =
2105  {
2112  };
2113 
2114  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2115  "Reference permute: input is not a supported type.");
2116 
2117  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2118  "Reference permute: output is not a supported type.");
2119 
2121  "Reference permute: input and output types are mismatched.");
2122 
2123  return supported;
2124 }
2125 
2127  const TensorInfo& output,
2128  const Pooling2dDescriptor& descriptor,
2129  Optional<std::string&> reasonIfUnsupported) const
2130 {
2132  bool supported = true;
2133 
2134  // Define supported output and inputs types.
2135  std::array<DataType,6> supportedTypes =
2136  {
2142  };
2143 
2144  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2145  "Reference poolind2d: input is not a supported type.");
2146 
2147  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2148  "Reference poolind2d: output is not a supported type.");
2149 
2151  "Reference poolind2d: input and output types are mismatched.");
2152 
2153  return supported;
2154 }
2155 
2157  const TensorInfo& output,
2158  const Pooling3dDescriptor& descriptor,
2159  Optional<std::string&> reasonIfUnsupported) const
2160 {
2162  bool supported = true;
2163 
2164  // Define supported output and inputs types.
2165  std::array<DataType,6> supportedTypes =
2166  {
2172  };
2173 
2174  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2175  "Reference poolind3d: input is not a supported type.");
2176 
2177  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2178  "Reference poolind3d: output is not a supported type.");
2179 
2181  "Reference poolind3d: input and output types are mismatched.");
2182 
2183  return supported;
2184 }
2185 
2186 
2188  const TensorInfo& previousOutputIn,
2189  const TensorInfo& previousCellStateIn,
2190  const TensorInfo& outputStateOut,
2191  const TensorInfo& cellStateOut,
2192  const TensorInfo& output,
2193  const QLstmDescriptor& descriptor,
2194  const LstmInputParamsInfo& paramsInfo,
2195  Optional<std::string&> reasonIfUnsupported) const
2196 {
2197  IgnoreUnused(input);
2205 
2207 
2208  return true;
2209 }
2210 
2212  const TensorInfo& output,
2213  Optional<std::string&> reasonIfUnsupported) const
2214 {
2215  bool supported = true;
2216 
2217  // Define supported input types.
2218  std::array<DataType,7> supportedInputTypes = {
2225  };
2226 
2227  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2228  "Reference quantize: input type not supported.");
2229 
2230  // Define supported output types.
2231  std::array<DataType,4> supportedOutputTypes = {
2236  };
2237  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2238  "Reference quantize: output type not supported.");
2239 
2241  "Reference quantize: input and output shapes have different num total elements.");
2242 
2243  return supported;
2244 }
2245 
2247  const TensorInfo& output,
2248  Optional<std::string&> reasonIfUnsupported) const
2249 {
2250  IgnoreUnused(input);
2251  // Define supported output types.
2252  std::array<DataType,1> supportedOutputTypes =
2253  {
2255  };
2256 
2257  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2258  "Reference rank: input type not supported.");
2259 }
2260 
2262  const TensorInfo& output,
2263  const ReduceDescriptor& descriptor,
2264  Optional<std::string&> reasonIfUnsupported) const
2265 {
2267  bool supported = true;
2268  std::array<DataType,7> supportedTypes =
2269  {
2276  };
2277 
2278  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2279  "Reference Reduce: input type not supported");
2280 
2281  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2282  "Reference Reduce: output type not supported");
2283 
2285  "Reference Reduce: input and output types not matching");
2286 
2287  return supported;
2288 }
2289 
2291  const TensorInfo& output,
2292  const ReshapeDescriptor& descriptor,
2293  Optional<std::string&> reasonIfUnsupported) const
2294 {
2297  // Define supported output types.
2298  std::array<DataType,8> supportedOutputTypes =
2299  {
2308  };
2309 
2310  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2311  "Reference reshape: input type not supported.");
2312 }
2313 
2315  const TensorInfo& output,
2316  const ResizeDescriptor& descriptor,
2317  Optional<std::string&> reasonIfUnsupported) const
2318 {
2320  bool supported = true;
2321  std::array<DataType,6> supportedTypes =
2322  {
2329  };
2330 
2331  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2332  "Reference Resize: input type not supported");
2333 
2334  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2335  "Reference Resize: output type not supported");
2336 
2338  "Reference Resize: input and output types not matching");
2339 
2340  return supported;
2341 }
2342 
2344  const TensorInfo& output,
2345  Optional<std::string&> reasonIfUnsupported) const
2346 {
2347  IgnoreUnused(input);
2348  bool supported = true;
2349 
2350  std::array<DataType, 1> supportedTypes =
2351  {
2353  };
2354 
2355  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2356  "Reference Shape: output type not supported");
2357 
2358  return supported;
2359 }
2360 
2362  const TensorInfo& output,
2363  const SliceDescriptor& descriptor,
2364  Optional<std::string&> reasonIfUnsupported) const
2365 {
2367  bool supported = true;
2368 
2369  std::array<DataType, 5> supportedTypes =
2370  {
2375  };
2376 
2377  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2378  "Reference Slice: input type not supported");
2379 
2380  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2381  "Reference Slice: output type not supported");
2382 
2384  "Reference Slice: input and output types are mismatched");
2385 
2386  return supported;
2387 }
2388 
2390  const TensorInfo& output,
2391  const SoftmaxDescriptor& descriptor,
2392  Optional<std::string&> reasonIfUnsupported) const
2393 {
2395  bool supported = true;
2396  std::array<DataType,7> supportedTypes =
2397  {
2404  };
2405 
2406  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2407  "Reference Softmax: output type not supported");
2408 
2409  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2410  "Reference Softmax: input type not supported");
2411 
2413  "Reference Softmax: input type not supported");
2414 
2415  return supported;
2416 }
2417 
2419  const TensorInfo& output,
2420  const SpaceToBatchNdDescriptor& descriptor,
2421  Optional<std::string&> reasonIfUnsupported) const
2422 {
2424  bool supported = true;
2425  std::array<DataType,6> supportedTypes =
2426  {
2432  };
2433 
2434  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2435  "Reference SpaceToBatchNd: input type not supported");
2436 
2437  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2438  "Reference SpaceToBatchNd: output type not supported");
2439 
2441  "Reference SpaceToBatchNd: input and output types are mismatched");
2442 
2443  return supported;
2444 }
2445 
2447  const TensorInfo& output,
2448  const SpaceToDepthDescriptor& descriptor,
2449  Optional<std::string&> reasonIfUnsupported) const
2450 {
2451 
2453  bool supported = true;
2454 
2455  std::array<DataType,6> supportedTypes =
2456  {
2462  };
2463 
2464  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2465  "Reference SpaceToDepth: input type not supported");
2466 
2467  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2468  "Reference SpaceToDepth: output type not supported");
2469 
2471  "Reference SpaceToDepth: input and output types are mismatched");
2472 
2473  return supported;
2474 }
2475 
2477  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2478  const ViewsDescriptor& descriptor,
2479  Optional<std::string&> reasonIfUnsupported) const
2480 {
2482  bool supported = true;
2483  std::array<DataType,6> supportedTypes =
2484  {
2490  };
2491 
2492  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2493  "Reference splitter: output type not supported");
2494  for (const TensorInfo& output : outputs)
2495  {
2496  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2497  "Reference splitter: input type not supported");
2498 
2500  "Reference splitter: input and output types mismatched.");
2501  }
2502 
2503  return supported;
2504 }
2505 
2506 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2507  const TensorInfo& output,
2508  const StackDescriptor& descriptor,
2509  Optional<std::string&> reasonIfUnsupported) const
2510 {
2512 
2513  bool supported = true;
2514  std::array<DataType,7> supportedTypes =
2515  {
2522  };
2523 
2524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2525  "Reference stack: output type not supported");
2526  for (const TensorInfo* input : inputs)
2527  {
2528  ARMNN_ASSERT(input != nullptr);
2529  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2530  "Reference stack: input type not supported");
2531 
2533  "Reference stack: input and output types mismatched.");
2534  }
2535 
2536  return supported;
2537 }
2538 
2540  const TensorInfo& output,
2541  const StridedSliceDescriptor& descriptor,
2542  Optional<std::string&> reasonIfUnsupported) const
2543 {
2545  bool supported = true;
2546 
2547  std::array<DataType,5> supportedTypes =
2548  {
2553  };
2554 
2555  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2556  "Reference StridedSlice: input type not supported");
2557 
2558  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2559  "Reference StridedSlice: output type not supported");
2560 
2562  "Reference StridedSlice: input and output types are mismatched");
2563 
2564  return supported;
2565 }
2566 
2568  const TensorInfo& input1,
2569  const TensorInfo& output,
2570  Optional<std::string&> reasonIfUnsupported) const
2571 {
2572  bool supported = true;
2573 
2574  std::array<DataType,7> supportedTypes = {
2581  };
2582 
2583  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2584  "Reference subtraction: input 0 is not a supported type.");
2585 
2586  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2587  "Reference subtraction: input 1 is not a supported type.");
2588 
2589  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2590  "Reference subtraction: output is not a supported type.");
2591 
2593  "Reference subtraction: input 0 and Input 1 types are mismatched");
2594 
2596  "Reference subtraction: input and output types are mismatched");
2597 
2599  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2600 
2601  return supported;
2602 }
2603 
2605  const TensorInfo& alpha,
2606  const TensorInfo& output,
2607  Optional<std::string&> reasonIfUnsupported) const
2608 {
2609  bool supported = true;
2610 
2611  std::array<DataType, 6> supportedTypes
2612  {
2618  };
2619 
2620  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2621  "PReLU: input is not a supported type.");
2622 
2623  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2624  "PReLU: alpha is not a supported type.");
2625 
2626  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2627  "PReLU: output is not a supported type.");
2628 
2630  "PReLU: input, alpha and output types are mismatched");
2631 
2633  "PReLU: shapes are not suitable for implicit broadcast");
2634 
2635  return supported;
2636 }
2637 
2639  const TensorInfo& output,
2640  const TransposeConvolution2dDescriptor& descriptor,
2641  const TensorInfo& weights,
2642  const Optional<TensorInfo>& biases,
2643  Optional<std::string&> reasonIfUnsupported) const
2644 {
2646  bool supported = true;
2647 
2648  std::array<DataType,7> supportedTypes =
2649  {
2656  };
2657 
2658  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2659  "Reference TransposeConvolution2d: input is not a supported type.");
2660 
2661  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2662  "Reference TransposeConvolution2d: output is not a supported type.");
2663 
2665  "Reference TransposeConvolution2d: input and output types mismatched.");
2666 
2667 
2668  const DataType inputType = input.GetDataType();
2669  if (IsQuantized8BitType(inputType))
2670  {
2671  std::array<DataType, 3> supportedWeightTypes =
2672  {
2676  };
2677 
2678  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2679  "Reference TransposeConvolution2d: weights type not supported for "
2680  "quantized input.");
2681  }
2682  else
2683  {
2684  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2685  "Reference TransposeConvolution2d: weights is not a supported type.");
2686 
2688  "Reference TransposeConvolution2d: input and weights types mismatched.");
2689  }
2690 
2691  if (biases.has_value())
2692  {
2693  std::array<DataType,4> biasesSupportedTypes =
2694  {
2698  };
2699  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2700  "Reference TransposeConvolution2d: biases is not a supported type.");
2701  }
2702 
2703  return supported;
2704 }
2705 
2707  const TensorInfo& output,
2708  const TransposeDescriptor& descriptor,
2709  Optional<std::string&> reasonIfUnsupported) const
2710 {
2712  bool supported = true;
2713 
2714  // Define supported output and inputs types.
2715  std::array<DataType, 6> supportedTypes =
2716  {
2723  };
2724 
2725  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2726  "Reference transpose: input is not a supported type.");
2727 
2728  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2729  "Reference transpose: output is not a supported type.");
2730 
2732  "Reference transpose: input and output types are mismatched.");
2733 
2734  return supported;
2735 }
2736 
2738  const TensorInfo& input,
2739  const TensorInfo& outputStateIn,
2740  const TensorInfo& cellStateIn,
2741  const TensorInfo& outputStateOut,
2742  const TensorInfo& cellStateOut,
2743  const TensorInfo& output,
2744  const UnidirectionalSequenceLstmDescriptor& descriptor,
2745  const LstmInputParamsInfo& paramsInfo,
2746  Optional<std::string&> reasonIfUnsupported) const
2747 {
2754  bool supported = true;
2755 
2756  std::array<DataType, 2> supportedTypes =
2757  {
2760  };
2761 
2762  std::array<DataType, 2> supportedWeightTypes =
2763  {
2766  };
2767 
2768  std::array<DataType, 3> supportedBiasTypes =
2769  {
2773  };
2774 
2775  // check inputs and outputs
2776  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2777  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2778  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2779  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2780 
2781  // check layer parameters
2782  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2784  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2785  "is not a supported type.");
2786  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2788  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2789  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2791  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2792  "is not a supported type.");
2793  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2795  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2796  "is not a supported type.");
2797  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2799  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2800  "is not a supported type.");
2801  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2803  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2804  "is not a supported type.");
2805 
2806  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2807  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2808  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2809  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2810  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2811  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2812  if (!descriptor.m_CifgEnabled)
2813  {
2814  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2816  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2817  "is not a supported type.");
2818  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2820  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2821  "is not a supported type.");
2822  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2823  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2824  if (descriptor.m_PeepholeEnabled)
2825  {
2826  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2828  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2829  "is not a supported type.");
2830  }
2831  }
2832  if (descriptor.m_PeepholeEnabled)
2833  {
2834  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2836  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2837  "is not a supported type.");
2838  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2840  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2841  "is not a supported type.");
2842  }
2843  if (descriptor.m_ProjectionEnabled)
2844  {
2845  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2847  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2848  "is not a supported type.");
2849  if (paramsInfo.m_ProjectionBias != nullptr)
2850  {
2851  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2852  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2853  "are mismatched");
2854  }
2855  }
2856  if (descriptor.m_LayerNormEnabled)
2857  {
2858  if (!descriptor.m_CifgEnabled)
2859  {
2860  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2862  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2863  "is not a supported type.");
2864  }
2865  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2867  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2868  "is not a supported type.");
2869  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2871  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2872  "is not a supported type.");
2873  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2875  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2876  "is not a supported type.");
2877  }
2878 
2879  return supported;
2880 }
2881 
2882 } // namespace armnn
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1714
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::ActivationFunction::Abs
@ Abs
armnn::ActivationFunction::Elu
@ Elu
armnn::LayerType::Floor
@ Floor
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1528
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:967
armnn::LayerType::MemCopy
@ MemCopy
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1104
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2314
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1592
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:508
armnn::ShapesAreSameRank
Definition: LayerSupportRules.hpp:138
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2211
armnn::LayerType::Transpose
@ Transpose
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2638
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:289
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1629
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2706
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:912
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:737
armnn::ActivationFunction::Linear
@ Linear
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1437
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:109
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:784
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1143
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:157
armnn::TensorNumDimensionsAreGreaterOrEqualTo
Definition: LayerSupportRules.hpp:189
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::ILayerSupport::detectionBoxes
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
Definition: ILayerSupport.hpp:174
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1198
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1175
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1049
armnn::LayerType::Maximum
@ Maximum
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:475
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2027
armnn::LayerType::Quantize
@ Quantize
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1531
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2418
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:932
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1924
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1250
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:399
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1485
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1862
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2539
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:909
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::DataType::Signed32
@ Signed32
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2059
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2446
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::LayerType::Merge
@ Merge
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1990
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:224
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:509
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:845
armnn::LayerType::Addition
@ Addition
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1559
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2506
armnn::BiasAndWeightsTypesCompatible
Definition: LayerSupportRules.hpp:126
armnn::TensorNumDimensionsAreCorrect
Definition: LayerSupportRules.hpp:181
RefLayerSupport.hpp
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2246
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1036
armnn::LayerType::Division
@ Division
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:644
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1663
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2604
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1309
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:843
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:38
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1022
armnn::LayerType::Activation
@ Activation
armnn::ILayerSupport::detectionClasses
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
Definition: ILayerSupport.hpp:175
armnn::LayerType::Normalization
@ Normalization
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:681
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:893
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:466
armnn::LayerType::Reshape
@ Reshape
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1397
armnn::ILayerSupport::numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
Definition: ILayerSupport.hpp:177
armnn::LayerType::Resize
@ Resize
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:876
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:735
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1267
armnn::LayerType::Rank
@ Rank
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:627
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1119
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::RefLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:61
armnn::ShapesAreBroadcastCompatible
Definition: LayerSupportRules.hpp:154
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:813
LayerSupportRules.hpp
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::ShapesAreSameTotalSize
Definition: LayerSupportRules.hpp:146
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2126
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:777
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2343
armnn::DataType::Float32
@ Float32
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1509
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:556
armnn::LayerType::GatherNd
@ GatherNd
armnn::TypeIs
Definition: LayerSupportRules.hpp:102
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1825
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::DataType::Signed64
@ Signed64
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2065
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:502
armnn::LayerType::Lstm
@ Lstm
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:796
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1327
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1346
LayerSupportCommon.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2290
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
armnn::ActivationFunction::Square
@ Square
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1134
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::ILayerSupport::scores
const TensorInfo & scores
Definition: ILayerSupport.hpp:172
armnn::LayerType::Unmap
@ Unmap
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1163
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2261
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::BiasAndWeightsTypesMatch
Definition: LayerSupportRules.hpp:118
armnn::LayerType::Mean
@ Mean
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:181
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:612
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:970
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1440
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1387
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2361
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2476
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2187
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::ActivationFunction::TanH
@ TanH
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:339
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1465
armnn::TypeNotPerAxisQuantized
Definition: LayerSupportRules.hpp:110
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1415
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1598
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2567
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1953
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::ILayerSupport::detectionScores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
Definition: ILayerSupport.hpp:176
armnn::Optional
Definition: Optional.hpp:270
armnn::ILayerSupport::anchors
const TensorInfo const TensorInfo & anchors
Definition: ILayerSupport.hpp:173
armnn::RefLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1299
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:927
armnn::DataType::QSymmS8
@ QSymmS8
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:575
armnn::LayerType::Concat
@ Concat
NumericCast.hpp
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1462
armnn::DataType::QSymmS16
@ QSymmS16
armnn::FakeQuantizationDescriptor
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Definition: Descriptors.hpp:874
armnn::TypesAreEqual
Definition: LayerSupportRules.hpp:72
armnn::LayerType::Cast
@ Cast
armnn::ActivationFunction::ReLu
@ ReLu
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2389
IgnoreUnused.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2095
TypesUtils.hpp
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::LayerType::LogSoftmax
@ LogSoftmax
Types.hpp
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2156
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1232
armnn::Rule
Definition: LayerSupportRules.hpp:48
armnn::LayerType::Output
@ Output
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:689
armnn::DataType::Boolean
@ Boolean
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2737
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1688
armnn::LayerType::Prelu
@ Prelu
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:815
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::LayerType::Dequantize
@ Dequantize
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:947
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:990
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnn::TypeAnyOf
Definition: LayerSupportRules.hpp:90