ArmNN
 23.05
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
128  {
129  if (infos.size() != 4)
130  {
131  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
132  "TensorInfos should be of format: {input, output, weights, biases}.");
133  }
134 
135  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
136  if (infos[3] == TensorInfo())
137  {
138  return IsConvolution2dSupported(infos[0],
139  infos[1],
140  desc,
141  infos[2],
142  EmptyOptional(),
144  }
145  else
146  {
147  return IsConvolution2dSupported(infos[0],
148  infos[1],
149  desc,
150  infos[2],
151  infos[3],
153  }
154  }
156  return IsDepthToSpaceSupported(infos[0],
157  infos[1],
158  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
161  {
162  if (infos.size() != 4)
163  {
164  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
165  "TensorInfos should be of format: {input, output, weights, biases}.");
166  }
167 
168  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
169  if (infos[3] == TensorInfo())
170  {
171  return IsDepthwiseConvolutionSupported(infos[0],
172  infos[1],
173  desc,
174  infos[2],
175  EmptyOptional(),
177  }
178  else
179  {
180  return IsDepthwiseConvolutionSupported(infos[0],
181  infos[1],
182  desc,
183  infos[2],
184  infos[3],
186  }
187  }
189  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
190  case LayerType::Division:
191  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
193  {
194  std::array<DataType, 7> supportedTypes =
195  {
202  };
203 
204  bool supported = true;
205  supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
206  "Reference elementwise unary: input type not supported");
207 
208  supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
209  "Reference elementwise unary: input type not supported");
210 
211  supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
212  "Reference elementwise unary: output type not supported");
213 
214  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
215  "Reference elementwise unary: input types not matching");
216 
217  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
218  "Reference elementwise unary: input and output types not matching");
219 
220  return supported;
221  }
223  return IsElementwiseUnarySupported(infos[0],
224  infos[1],
225  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
227  case LayerType::Fill:
228  return IsFillSupported(infos[0],
229  infos[1],
230  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
232  case LayerType::Floor:
233  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
235  return IsFullyConnectedSupported(infos[0],
236  infos[1],
237  infos[2],
238  infos[3],
239  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
241  case LayerType::Gather:
242  return IsGatherSupported(infos[0],
243  infos[1],
244  infos[2],
245  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
247  case LayerType::GatherNd:
248  return IsGatherNdSupported(infos[0],
249  infos[1],
250  infos[2],
252  case LayerType::Input:
253  return IsInputSupported(infos[0], reasonIfUnsupported);
255  return IsInstanceNormalizationSupported(infos[0],
256  infos[1],
257  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
258  (&descriptor)),
261  return IsL2NormalizationSupported(infos[0],
262  infos[1],
263  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
266  return IsLogicalBinarySupported(infos[0],
267  infos[1],
268  infos[2],
269  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
272  return IsLogSoftmaxSupported(infos[0],
273  infos[1],
274  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
276  case LayerType::Lstm:
277  return IsLstmSupported(infos[0],
278  infos[1],
279  infos[2],
280  infos[3],
281  infos[4],
282  infos[5],
283  infos[6],
284  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
285  lstmParamsInfo.value(),
287  case LayerType::QLstm:
288  return IsQLstmSupported(infos[0],
289  infos[1],
290  infos[2],
291  infos[3],
292  infos[4],
293  infos[5],
294  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
295  lstmParamsInfo.value(),
297  case LayerType::Maximum:
298  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
299  case LayerType::Mean:
300  return IsMeanSupported(infos[0],
301  infos[1],
302  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
304  case LayerType::Minimum:
305  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
307  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
309  return IsNormalizationSupported(infos[0],
310  infos[1],
311  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
313  case LayerType::Output:
314  return IsOutputSupported(infos[0], reasonIfUnsupported);
315  case LayerType::Pad:
316  return IsPadSupported(infos[0],
317  infos[1],
318  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
320  case LayerType::Permute:
321  return IsPermuteSupported(infos[0],
322  infos[1],
323  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
326  return IsPooling2dSupported(infos[0],
327  infos[1],
328  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
330  case LayerType::Prelu:
331  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
332  case LayerType::Quantize:
333  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
334  case LayerType::Reshape:
335  return IsReshapeSupported(infos[0],
336  infos[1],
337  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
339  case LayerType::Resize:
340  return IsResizeSupported(infos[0],
341  infos[1],
342  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
344  case LayerType::Reduce:
345  return IsReduceSupported(infos[0],
346  infos[1],
347  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
349  case LayerType::Slice:
350  return IsSliceSupported(infos[0],
351  infos[1],
352  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
354  case LayerType::Softmax:
355  return IsSoftmaxSupported(infos[0],
356  infos[1],
357  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
360  return IsSpaceToBatchNdSupported(infos[0],
361  infos[1],
362  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
365  return IsSpaceToDepthSupported(infos[0],
366  infos[1],
367  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
369  case LayerType::Splitter:
370  {
371  std::vector<TensorInfo> outputInfos;
372  for (uint32_t i = 1; i < infos.size(); i++)
373  {
374  outputInfos.push_back(infos[i]);
375  }
376  return IsSplitterSupported(infos[0],
377  {outputInfos.begin(), outputInfos.end()},
378  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
380  }
381  case LayerType::Stack:
382  {
383  std::vector<const TensorInfo*> inputInfos;
384  for (uint32_t i = 0; i < infos.size() - 1; i++)
385  {
386  inputInfos.push_back(&infos[i]);
387  }
388  return IsStackSupported(inputInfos,
389  infos[infos.size() - 1],
390  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
392  }
394  return IsStridedSliceSupported(infos[0],
395  infos[1],
396  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
399  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
401  return IsTransposeSupported(infos[0],
402  infos[1],
403  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
406  {
407  if (infos.size() != 4)
408  {
409  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
410  "TensorInfos should be of format: {input, output, weights, biases}.");
411  }
412 
413  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
414  if (infos[3] == TensorInfo())
415  {
416  return IsTransposeConvolution2dSupported(infos[0],
417  infos[1],
418  desc,
419  infos[2],
420  EmptyOptional(),
422  }
423  else
424  {
425  return IsTransposeConvolution2dSupported(infos[0],
426  infos[1],
427  desc,
428  infos[2],
429  infos[3],
431  }
432  }
433  case LayerType::Cast:
434  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
436  return IsChannelShuffleSupported(infos[0],
437  infos[1],
438  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
441  {
442  if (infos.size() != 4)
443  {
444  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
445  "TensorInfos should be of format: {input, output, weights, biases}.");
446  }
447 
448  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
449  if (infos[3] == TensorInfo())
450  {
451  return IsConvolution3dSupported(infos[0],
452  infos[1],
453  desc,
454  infos[2],
455  EmptyOptional(),
457  }
458  else
459  {
460  return IsConvolution3dSupported(infos[0],
461  infos[1],
462  desc,
463  infos[2],
464  infos[3],
466  }
467  }
468  case LayerType::Debug:
469  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
471  return IsDetectionPostProcessSupported(infos[0],
472  infos[1],
473  infos[2],
474  infos[3],
475  infos[4],
476  infos[5],
477  infos[6],
478  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
479  (&descriptor)),
482  return IsFakeQuantizationSupported(infos[0],
483  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
485  case LayerType::MemCopy:
486  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
487  case LayerType::Rank:
488  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
489  case LayerType::Shape:
490  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
492  {
493  if (infos.size() != 6)
494  {
495  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
496  "should be of format: {input, outputStateIn, cellStateIn, "
497  "hiddenStateOutputVal, cellStateOutputVal, output}");
498  }
499  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
501  infos[1],
502  infos[2],
503  infos[3],
504  infos[4],
505  infos[5],
506  desc,
507  lstmParamsInfo.value(),
509  }
511  return IsPooling3dSupported(infos[0],
512  infos[1],
513  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
515  case LayerType::Map:
516  return true;
517  case LayerType::Unmap:
518  return true;
521  case LayerType::Merge:
522  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
525  infos[1],
526  infos[2],
527  infos[3],
528  infos[4],
529  quantizedLstmInputParamsInfo.value(),
531  default:
532  // layers not supported in neon by default:
533  // precompiled, standin, switch
534  return false;
535  }
536 }
537 
539  const TensorInfo& output,
540  const ActivationDescriptor& descriptor,
541  Optional<std::string&> reasonIfUnsupported) const
542 {
543  bool supported = true;
544 
545  // Define supported types.
546  std::array<DataType,6> supportedTypes = {
552  };
553 
554  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
555  "Reference activation: input type not supported.");
556 
557  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
558  "Reference activation: output type not supported.");
559 
561  "Reference activation: input and output types mismatched.");
562 
564  "Reference activation: input and output shapes are of different rank.");
565 
566 
567  struct ActivationFunctionSupported : public Rule
568  {
569  ActivationFunctionSupported(const ActivationDescriptor& desc)
570  {
571  switch(desc.m_Function)
572  {
585  {
586  m_Res = true;
587  break;
588  }
589  default:
590  {
591  m_Res = false;
592  break;
593  }
594  }
595  }
596  };
597 
598  // Function is supported
599  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
600  "Reference activation: function not supported.");
601 
602  return supported;
603 }
604 
606  const TensorInfo& input1,
607  const TensorInfo& output,
608  Optional<std::string&> reasonIfUnsupported) const
609 {
610  bool supported = true;
611 
612  std::array<DataType,7> supportedTypes = {
619  };
620 
621  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
622  "Reference addition: input 0 is not a supported type.");
623 
624  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
625  "Reference addition: input 1 is not a supported type.");
626 
627  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
628  "Reference addition: output is not a supported type.");
629 
631  "Reference addition: input 0 and Input 1 types are mismatched");
632 
634  "Reference addition: input and output types are mismatched");
635 
637  "Reference addition: shapes are not suitable for implicit broadcast.");
638 
639  return supported;
640 }
641 
643  const armnn::ArgMinMaxDescriptor &descriptor,
644  armnn::Optional<std::string &> reasonIfUnsupported) const
645 {
647 
648  std::array<DataType, 8> supportedInputTypes =
649  {
657  };
658 
659  std::array<DataType,2> supportedOutputTypes = {
662  };
663 
664  bool supported = true;
665 
666  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
667  "Reference ArgMinMax: input is not a supported type.");
668  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
669  "Reference ArgMinMax: output type not supported");
670 
671  return supported;
672 }
673 
675  const TensorInfo& inputY,
676  const TensorInfo& output,
677  const BatchMatMulDescriptor& descriptor,
678  Optional<std::string &> reasonIfUnsupported) const
679 {
681 
682  std::array<DataType, 6> supportedTypes =
683  {
689  };
690 
691  bool supported = true;
692 
693  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
694  "Reference batch matrix multiplication: input X is not a supported type");
695 
696  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
697  "Reference batch matrix multiplication: input Y is not a supported type");
698 
699  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
700  "Reference batch matrix multiplication: output is not a supported type");
701 
702  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
703  "Reference batch matrix multiplication: input X and input Y types are mismatched");
704 
706  "Reference batch matrix multiplication: inputs and output types are mismatched");
707 
710  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
711 
714  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
715 
716  return supported;
717 }
718 
720  const TensorInfo& output,
721  const TensorInfo& mean,
722  const TensorInfo& variance,
723  const TensorInfo& beta,
724  const TensorInfo& gamma,
725  const BatchNormalizationDescriptor& descriptor,
726  Optional<std::string&> reasonIfUnsupported) const
727 {
729 
730  std::array<DataType, 6> supportedTypes =
731  {
737  };
738 
739  bool supported = true;
740 
741  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
742  "Reference batch normalization: input is not a supported type.");
743 
744  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
745  "Reference batch normalization: output is not a supported type.");
746 
748  "Reference batch normalization: input and output types are mismatched");
749 
750  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
751  "Reference batch normalization: mean is not a supported type.");
752 
753  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
754  "Reference batch normalization: variance is not a supported type.");
755 
756  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
757  "Reference batch normalization: beta is not a supported type.");
758 
759  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
760  "Reference batch normalization: gamma is not a supported type.");
761 
762  return supported;
763 }
764 
766  const TensorInfo& output,
767  const BatchToSpaceNdDescriptor& descriptor,
768  Optional<std::string&> reasonIfUnsupported) const
769 {
771 
772  bool supported = true;
773 
774  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
775  std::string inputTensorStr = "input";
776  std::string outputTensorStr = "output";
777 
778  // Define supported types.
779  std::array<DataType,6> supportedTypes =
780  {
786  };
787 
788  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
789  "Reference BatchToSpaceNd: input type not supported.");
790 
791  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
792  "Reference BatchToSpaceNd: output type not supported.");
793 
795  "Reference BatchToSpaceNd: input and output types mismatched.");
796 
799  CreateIncorrectDimensionsErrorMsg(4,
801  batchToSpaceNdLayerStr,
802  outputTensorStr).data());
803 
804  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
806  CreateIncorrectDimensionsErrorMsg(4,
807  input.GetNumDimensions(),
808  batchToSpaceNdLayerStr,
809  inputTensorStr).data());
810 
811  return supported;
812 }
813 
815  const TensorInfo& output,
816  Optional<std::string&> reasonIfUnsupported) const
817 {
818  std::array<DataType, 9> supportedInputTypes =
819  {
827  };
828 
829  bool supported = true;
830  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
831  "Reference cast: input is not a supported type");
832 
833 
834  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
835  "Reference cast: output is not a supported type");
836 
838  "Reference cast: input and output shapes have different number of total elements");
839 
840  return supported;
841 }
842 
844  const TensorInfo& output,
845  const ChannelShuffleDescriptor& descriptor,
846  Optional<std::string&> reasonIfUnsupported) const
847 {
849  bool supported = true;
850 
851  // Define supported output and inputs types.
852  std::array<DataType, 7> supportedTypes =
853  {
860  };
861 
862  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
863  "Reference ChannelShuffle: input is not a supported type.");
864 
865  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
866  "Reference ChannelShuffle: output is not a supported type.");
867 
869  "Reference ChannelShuffle: input and output types are mismatched.");
870 
871  return supported;
872 }
873 
874 
876  const TensorInfo& input1,
877  const TensorInfo& output,
878  const ComparisonDescriptor& descriptor,
879  Optional<std::string&> reasonIfUnsupported) const
880 {
882  std::array<DataType, 8> supportedInputTypes =
883  {
891  };
892 
893  bool supported = true;
894  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
895  "Reference comparison: input 0 is not a supported type");
896 
898  "Reference comparison: input 0 and Input 1 types are mismatched");
899 
901  "Reference comparison: output is not of type Boolean");
902 
903  return supported;
904 }
905 
906 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
907  const TensorInfo& output,
908  const OriginsDescriptor& descriptor,
909  Optional<std::string&> reasonIfUnsupported) const
910 {
912 
913  bool supported = true;
914  std::array<DataType,7> supportedTypes =
915  {
922  };
923 
924  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
925  "Reference concatenation: output type not supported");
926  for (const TensorInfo* input : inputs)
927  {
928  ARMNN_ASSERT(input != nullptr);
929  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
930  "Reference concatenation: input type not supported");
931 
933  "Reference concatenation: input and output types mismatched.");
934  }
935 
936  return supported;
937 }
938 
940  Optional<std::string&> reasonIfUnsupported) const
941 {
942  std::array<DataType,8> supportedTypes =
943  {
951  };
952 
953  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
954  "Reference constant: output is not a supported type.");
955 }
956 
958  const TensorInfo& output,
959  Optional<std::string&> reasonIfUnsupported) const
960 {
962  input.GetDataType(),
963  &TrueFunc<>,
964  &FalseInputFuncF32<>,
965  &FalseFuncU8<>,
966  &FalseFuncI32<>,
967  &FalseFuncU8<>) &&
970  &FalseOutputFuncF16<>,
971  &TrueFunc<>,
972  &FalseFuncU8<>,
973  &FalseFuncI32<>,
974  &FalseFuncU8<>));
975 }
976 
978  const TensorInfo& output,
979  Optional<std::string&> reasonIfUnsupported) const
980 {
982  input.GetDataType(),
983  &FalseInputFuncF16<>,
984  &TrueFunc<>,
985  &FalseFuncU8<>,
986  &FalseFuncI32<>,
987  &FalseFuncU8<>) &&
990  &TrueFunc<>,
991  &FalseOutputFuncF32<>,
992  &FalseFuncU8<>,
993  &FalseFuncI32<>,
994  &FalseFuncU8<>));
995 }
996 
998  const TensorInfo& output,
999  const Convolution2dDescriptor& descriptor,
1000  const TensorInfo& weights,
1001  const Optional<TensorInfo>& biases,
1002  Optional<std::string&> reasonIfUnsupported) const
1003 {
1004  bool supported = true;
1005 
1006  // Define supported types.
1007  std::array<DataType,7> supportedTypes =
1008  {
1015  };
1016 
1017  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1018  "Reference Convolution2d: input is not a supported type.");
1019 
1020  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1021  "Reference Convolution2d: output is not a supported type.");
1022 
1024  "Reference Convolution2d: input and output types mismatched.");
1025 
1026 
1027  const DataType inputType = input.GetDataType();
1028  if (IsQuantized8BitType(inputType))
1029  {
1030  std::array<DataType, 3> supportedWeightTypes =
1031  {
1035  };
1036 
1037  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1038  "Reference Convolution2d: weights type not supported for quantized input.");
1039  }
1040  else
1041  {
1042  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1043  "Reference Convolution2d: weights is not a supported type.");
1044 
1046  "Reference Convolution2d: input and weights types mismatched.");
1047  }
1048 
1049  if (biases.has_value())
1050  {
1051  std::array<DataType,4> biasesSupportedTypes =
1052  {
1056  };
1057 
1058  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1059  "Reference Convolution2d: biases is not a supported type.");
1060  }
1062 
1063  return supported;
1064 }
1065 
1067  const TensorInfo& output,
1068  const Convolution3dDescriptor& descriptor,
1069  const TensorInfo& weights,
1070  const Optional<TensorInfo>& biases,
1071  Optional<std::string&> reasonIfUnsupported) const
1072 {
1073  bool supported = true;
1074 
1075  // Define supported types.
1076  std::array<DataType,7> supportedTypes =
1077  {
1084  };
1085 
1086  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1087  "Reference Convolution3d: input is not a supported type.");
1088 
1089  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1090  "Reference Convolution3d: output is not a supported type.");
1091 
1093  "Reference Convolution3d: input and output types mismatched.");
1094 
1095  const DataType inputType = input.GetDataType();
1096  if (IsQuantized8BitType(inputType))
1097  {
1098  std::array<DataType, 3> supportedWeightTypes =
1099  {
1103  };
1104 
1105  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1106  "Reference Convolution3d: weights type not supported for quantized input.");
1107  }
1108  else
1109  {
1110  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1111  "Reference Convolution3d: weights is not a supported type.");
1112 
1114  "Reference Convolution3d: input and weights types mismatched.");
1115  }
1116 
1117  if (biases.has_value())
1118  {
1119  std::array<DataType,4> biasesSupportedTypes =
1120  {
1124  };
1125 
1126  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1127  "Reference Convolution3d: biases is not a supported type.");
1128  }
1130 
1131  return supported;
1132 }
1133 
1135  const TensorInfo& output,
1136  Optional<std::string&> reasonIfUnsupported) const
1137 {
1138  bool supported = true;
1139 
1140  std::array<DataType, 8> supportedTypes =
1141  {
1150  };
1151 
1152  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1153  "Reference for Debug layer: input type not supported");
1154 
1155  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1156  "Reference for Debug layer: output type not supported");
1157 
1159  "Reference for Debug layer: input and output types are mismatched");
1160 
1161  return supported;
1162 }
1163 
1165  const TensorInfo& output,
1166  const DepthToSpaceDescriptor& descriptor,
1167  Optional<std::string&> reasonIfUnsupported) const
1168 {
1170  bool supported = true;
1171 
1172  std::array<DataType,6> supportedTypes =
1173  {
1179  };
1180 
1181  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1182  "Reference DepthToSpace: input type not supported");
1183 
1184  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1185  "Reference DepthToSpace: output type not supported");
1186 
1188  "Reference DepthToSpace: input and output types are mismatched");
1189 
1190  return supported;
1191 }
1192 
1194  const TensorInfo& output,
1195  const DepthwiseConvolution2dDescriptor& descriptor,
1196  const TensorInfo& weights,
1197  const Optional<TensorInfo>& biases,
1198  Optional<std::string&> reasonIfUnsupported) const
1199 {
1201  bool supported = true;
1202 
1203  // Define supported types.
1204  std::array<DataType,7> supportedTypes =
1205  {
1212  };
1213 
1214  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1215  "Reference DepthwiseConvolution2d: input is not a supported type.");
1216 
1217  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1218  "Reference DepthwiseConvolution2d: output is not a supported type.");
1219 
1221  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1222 
1223  const DataType inputType = input.GetDataType();
1224  if (IsQuantized8BitType(inputType))
1225  {
1226  std::array<DataType, 3> supportedWeightTypes =
1227  {
1231  };
1232 
1233  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1234  "Reference DepthwiseConvolution2d: weights type not supported for "
1235  "quantized input.");
1236  }
1237  else
1238  {
1239  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1240  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1241 
1243  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1244  }
1245 
1246  if (biases.has_value())
1247  {
1248  std::array<DataType,4> biasesSupportedTypes =
1249  {
1253  };
1254  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1255  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1256  }
1257 
1258  return supported;
1259 
1260 }
1261 
1263  const TensorInfo& output,
1264  Optional<std::string&> reasonIfUnsupported) const
1265 {
1266  bool supported = true;
1267 
1268  std::array<DataType,5> supportedInputTypes = {
1274  };
1275 
1276  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1277  "Reference for Dequantize layer: input type not supported.");
1278 
1280  "Reference for Dequantize layer: per-axis quantized input not supported.");
1281 
1282  std::array<DataType,3> supportedOutputTypes = {
1285  };
1286 
1287  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1288  "Reference for Dequantize layer: output type not supported.");
1289 
1291  "Reference for Dequantize layer: input/output shapes have different num total "
1292  "elements.");
1293 
1294  return supported;
1295 }
1296 
1298  const TensorInfo& scores,
1299  const TensorInfo& anchors,
1300  const TensorInfo& detectionBoxes,
1301  const TensorInfo& detectionClasses,
1302  const TensorInfo& detectionScores,
1303  const TensorInfo& numDetections,
1304  const DetectionPostProcessDescriptor& descriptor,
1305  Optional<std::string&> reasonIfUnsupported) const
1306 {
1308 
1309  bool supported = true;
1310 
1311  std::array<DataType,6> supportedInputTypes =
1312  {
1318  };
1319 
1320  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1321  "Reference DetectionPostProcess: input 0 is not a supported type.");
1322 
1323  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1324  "Reference DetectionPostProcess: input 1 is not a supported type.");
1325 
1326  return supported;
1327 }
1328 
1330  const TensorInfo& output,
1331  const DepthwiseConvolution2dDescriptor& descriptor,
1332  const TensorInfo& weights,
1333  const Optional<TensorInfo>& biases,
1334  Optional<std::string&> reasonIfUnsupported) const
1335 {
1337 }
1338 
1340  const TensorInfo& input1,
1341  const TensorInfo& output,
1342  Optional<std::string&> reasonIfUnsupported) const
1343 {
1344  bool supported = true;
1345 
1346  std::array<DataType,7> supportedTypes = {
1353  };
1354 
1355  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1356  "Reference division: input 0 is not a supported type.");
1357 
1358  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1359  "Reference division: input 1 is not a supported type.");
1360 
1361  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1362  "Reference division: output is not a supported type.");
1363 
1365  "Reference division: input 0 and Input 1 types are mismatched");
1366 
1368  "Reference division: input and output types are mismatched");
1369 
1371  "Reference division: shapes are not suitable for implicit broadcast.");
1372 
1373  return supported;
1374 }
1375 
1377  const TensorInfo& output,
1378  const ElementwiseUnaryDescriptor& descriptor,
1379  Optional<std::string&> reasonIfUnsupported) const
1380 {
1382 
1383  std::array<DataType, 7> supportedTypes =
1384  {
1391  };
1392 
1393  std::array<DataType, 1> logicalSupportedTypes =
1394  {
1396  };
1397 
1398  bool supported = true;
1399 
1400  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1401  {
1402  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1403  "Reference elementwise unary: input type not supported");
1404 
1405  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1406  "Reference elementwise unary: output type not supported");
1407  }
1408  else
1409  {
1410  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1411  "Reference elementwise unary: input type not supported");
1412 
1413  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1414  "Reference elementwise unary: output type not supported");
1415  }
1416 
1418  "Reference elementwise unary: input and output types not matching");
1419 
1421  "Reference elementwise unary: input and output shapes"
1422  "have different number of total elements");
1423 
1424  return supported;
1425 }
1426 
1428  const FakeQuantizationDescriptor& descriptor,
1429  Optional<std::string&> reasonIfUnsupported) const
1430 {
1432  bool supported = true;
1433 
1434  std::array<DataType,1> supportedTypes =
1435  {
1437  };
1438 
1439  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1440  "Reference fake quantization: input type not supported.");
1441 
1442  return supported;
1443 }
1444 
1446  const TensorInfo& output,
1447  const FillDescriptor& descriptor,
1448  Optional<std::string&> reasonIfUnsupported) const
1449 {
1452 
1453  bool supported = true;
1454 
1455  std::array<DataType,3> supportedTypes =
1456  {
1460  };
1461 
1463  "Reference Fill: input type not supported.");
1464 
1465  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1466  "Reference Fill: output type not supported.");
1467  return supported;
1468 }
1469 
1471  const TensorInfo& output,
1472  Optional<std::string&> reasonIfUnsupported) const
1473 {
1475  bool supported = true;
1476 
1477  std::array<DataType,3> supportedTypes =
1478  {
1481  };
1482 
1483  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1484  "Reference Floor: input type not supported.");
1485 
1486  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1487  "Reference Floor: output type not supported.");
1488 
1489  return supported;
1490 }
1491 
1493  const TensorInfo& output,
1494  const TensorInfo& weights,
1495  const TensorInfo& biases,
1496  const FullyConnectedDescriptor& descriptor,
1497  Optional<std::string&> reasonIfUnsupported) const
1498 {
1499  bool supported = true;
1500 
1501  // Define supported types.
1502  std::array<DataType,6> supportedTypes =
1503  {
1509  };
1510 
1511  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1512  "Reference Fully Connected: input type not supported.");
1513 
1514  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1515  "Reference Fully Connected: output type not supported.");
1516 
1517  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1518  "Reference Fully Connected: weights type not supported.");
1519 
1521  "Reference Fully Connected: input and output types mismatched.");
1522 
1523  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1524  "Reference Fully Connected: weights is not a supported type.");
1525 
1527  "Reference Fully Connected: input and weights types mismatched.");
1528 
1529  if (descriptor.m_BiasEnabled)
1530  {
1531  // Defined supported types for bias
1532  std::array<DataType, 5>
1533  supportedBiasTypes =
1534  {
1539  };
1540 
1541  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1542  "Reference Fully Connected: bias type not supported.");
1543 
1545  "Reference Fully Connected: bias and weight types mismatch.");
1546 
1548  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1549 
1551  "Reference Fully Connected: bias must have 1 dimension.");
1552 
1553  }
1554 
1555  return supported;
1556 }
1557 
1559  const armnn::TensorInfo& input1,
1560  const armnn::TensorInfo& output,
1561  armnn::Optional<std::string&> reasonIfUnsupported) const
1562 {
1563  bool supported = true;
1564  std::array<DataType,7> supportedTypes =
1565  {
1572  };
1573 
1574  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1575  "Reference GatherNd: input type not supported");
1576 
1577  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1578  "Reference GatherNd: output type not supported");
1579 
1581  "Reference GatherNd: indices (input1) type not supported");
1582 
1584  "Reference GatherNd: input and output types not matching");
1585 
1586  return supported;
1587 }
1588 
1590  const armnn::TensorInfo& input1,
1591  const armnn::TensorInfo& output,
1592  const GatherDescriptor& descriptor,
1593  armnn::Optional<std::string&> reasonIfUnsupported) const
1594 {
1595  bool supported = true;
1596  std::array<DataType,7> supportedTypes =
1597  {
1604  };
1605 
1607  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1608  "Reference Gather: input type not supported");
1609 
1610  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1611  "Reference Gather: output type not supported");
1612 
1614  "Reference Gather: indices (input1) type not supported");
1615 
1617  "Reference Gather: input and output types not matching");
1618 
1619  return supported;
1620 }
1621 
1623  Optional<std::string&> /*reasonIfUnsupported*/) const
1624 {
1625  return true;
1626 }
1627 
1629  const TensorInfo& output,
1630  const InstanceNormalizationDescriptor& descriptor,
1631  Optional<std::string&> reasonIfUnsupported) const
1632 {
1634  // Define supported types
1635  std::array<DataType, 3> supportedTypes =
1636  {
1639  };
1640 
1641  bool supported = true;
1642 
1643  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1644  "Reference Instance Normalization: input type not supported.");
1645 
1646  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1647  "Reference Instance Normalization: output type not supported.");
1648 
1650  "Reference Instance Normalization: input and output types mismatched.");
1651 
1653  "Reference Instance Normalization: input and output shapes have different "
1654  "num total elements.");
1655 
1656  return supported;
1657 }
1658 
1660  const TensorInfo& output,
1661  const L2NormalizationDescriptor& descriptor,
1662  Optional<std::string&> reasonIfUnsupported) const
1663 {
1665  // Define supported types
1666  std::array<DataType, 6> supportedTypes =
1667  {
1673  };
1674 
1675  bool supported = true;
1676 
1677  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1678  "Reference L2normalization: input type not supported.");
1679 
1680  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1681  "Reference L2normalization: output type not supported.");
1682 
1684  "Reference L2normalization: input and output types mismatched.");
1685 
1687  "Reference L2normalization: input and output shapes have different "
1688  "num total elements.");
1689 
1690  return supported;
1691 }
1692 
1694  const TensorInfo& input1,
1695  const TensorInfo& output,
1696  const LogicalBinaryDescriptor& descriptor,
1697  Optional<std::string&> reasonIfUnsupported) const
1698 {
1700 
1701  std::array<DataType, 1> supportedTypes =
1702  {
1704  };
1705 
1706  bool supported = true;
1707  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1708  "Reference LogicalBinary: input 0 type not supported");
1709  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1710  "Reference LogicalBinary: input 1 type not supported");
1711 
1713  "Reference LogicalBinary: input and output types do not match");
1714 
1715  return supported;
1716 }
1717 
1719  const TensorInfo& output,
1720  const LogSoftmaxDescriptor& descriptor,
1721  Optional<std::string&> reasonIfUnsupported) const
1722 {
1724 
1725  std::array<DataType, 3> supportedTypes =
1726  {
1729  };
1730 
1731  bool supported = true;
1732  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1733  "Reference LogSoftmax: input type not supported");
1734 
1735  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1736  "Reference LogSoftmax: output type not supported");
1737 
1739  "Reference LogSoftmax: input and output types do not match");
1740 
1741  return supported;
1742 }
1743 
1745  const TensorInfo& outputStateIn,
1746  const TensorInfo& cellStateIn,
1747  const TensorInfo& scratchBuffer,
1748  const TensorInfo& outputStateOut,
1749  const TensorInfo& cellStateOut,
1750  const TensorInfo& output,
1751  const LstmDescriptor& descriptor,
1752  const LstmInputParamsInfo& paramsInfo,
1753  Optional<std::string&> reasonIfUnsupported) const
1754 {
1757 
1758  bool supported = true;
1759 
1760  std::array<DataType,3> supportedTypes = {
1763  };
1764 
1765  // check inputs and outputs
1766  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1767  "Reference Lstm: input is not a supported type.");
1769  "Reference Lstm: input and outputStateIn types are mismatched");
1771  "Reference Lstm: input and cellStateIn types are mismatched");
1773  "Reference Lstm: input and scratchBuffer types are mismatched");
1775  "Reference Lstm: input and outputStateOut types are mismatched");
1777  "Reference Lstm: input and cellStateOut types are mismatched");
1778 
1780  "Reference Lstm: input and output types are mismatched");
1781  // check layer parameters
1782  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1783  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1784  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1785  "Reference Lstm: input and InputToCellWeights types are mismatched");
1786  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1787  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1788  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1789  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1790  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1791  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1792  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1793  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1794  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1795  "Reference Lstm: input and ForgetGateBias types are mismatched");
1796  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1797  "Reference Lstm: input and CellBias types are mismatched");
1798  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1799  "Reference Lstm: input and OutputGateBias types are mismatched");
1800  if (!descriptor.m_CifgEnabled)
1801  {
1802  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1803  "Reference Lstm: input and InputToInputWeights types are mismatched");
1804  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1806  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1808  "Reference Lstm: input and InputGateBias types are mismatched");
1809  if (descriptor.m_PeepholeEnabled)
1810  {
1811  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1813  "Reference Lstm: input and CellToInputWeights types are mismatched");
1814  }
1815  }
1816  if (descriptor.m_PeepholeEnabled)
1817  {
1818  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1819  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1820  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1821  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1822  }
1823  if (descriptor.m_ProjectionEnabled)
1824  {
1825  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1826  "Reference Lstm: input and mProjectionWeights types are mismatched");
1827  if (paramsInfo.m_ProjectionBias != nullptr)
1828  {
1829  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1830  "Reference Lstm: input and ProjectionBias types are mismatched");
1831  }
1832  }
1833  if (descriptor.m_LayerNormEnabled)
1834  {
1835  if (!descriptor.m_CifgEnabled)
1836  {
1837  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1839  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1840  }
1841  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1843  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1844  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1846  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1847  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1849  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1850  }
1851 
1852  return supported;
1853 }
1854 
1856  const TensorInfo& input1,
1857  const TensorInfo& output,
1858  Optional<std::string&> reasonIfUnsupported) const
1859 {
1860  bool supported = true;
1861 
1862  std::array<DataType,7> supportedTypes = {
1869  };
1870 
1871  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1872  "Reference maximum: input 0 is not a supported type.");
1873 
1874  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1875  "Reference maximum: input 1 is not a supported type.");
1876 
1877  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1878  "Reference maximum: output is not a supported type.");
1879 
1881  "Reference maximum: input 0 and Input 1 types are mismatched");
1882 
1884  "Reference maximum: input and output types are mismatched");
1885 
1887  "Reference maximum: shapes are not suitable for implicit broadcast.");
1888 
1889  return supported;
1890 }
1891 
1893  const TensorInfo& output,
1894  const MeanDescriptor& descriptor,
1895  Optional<std::string&> reasonIfUnsupported) const
1896 {
1897  bool supported = true;
1898  std::string meanLayerStr = "Mean";
1899  std::string outputTensorStr = "output";
1900 
1901  std::array<DataType,6> supportedTypes =
1902  {
1908  };
1909 
1910  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1911  "Reference Mean: input type not supported.");
1912 
1914  "Reference Mean: input and output types are mismatched");
1915 
1916  if (descriptor.m_KeepDims)
1917  {
1920  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1922  meanLayerStr, outputTensorStr).data());
1923  }
1924  else if (descriptor.m_Axis.empty())
1925  {
1928  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1929  meanLayerStr, outputTensorStr).data());
1930  }
1931  else
1932  {
1933  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1934 
1935  if (outputDim > 0)
1936  {
1937  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1939  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1940  meanLayerStr, outputTensorStr).data());
1941  }
1942  else
1943  {
1946  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1947  meanLayerStr, outputTensorStr).data());
1948  }
1949  }
1950 
1951  return supported;
1952 }
1953 
1955  const TensorInfo &output,
1956  Optional<std::string &> reasonIfUnsupported) const
1957 {
1958  bool supported = true;
1959 
1960  std::array<DataType,7> supportedTypes =
1961  {
1969  };
1970 
1971  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1972  "Reference MemCopy: input type not supported");
1973 
1974  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1975  "Reference MemCopy: output type not supported");
1976 
1978  "Reference MemCopy: input and output types are mismatched");
1979 
1980  return supported;
1981 }
1982 
1984  const TensorInfo& input1,
1985  const TensorInfo& output,
1986  Optional<std::string&> reasonIfUnsupported) const
1987 {
1988  bool supported = true;
1989 
1990  std::array<DataType,7> supportedTypes = {
1997  };
1998 
1999  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2000  "Reference minimum: input 0 is not a supported type.");
2001 
2002  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2003  "Reference minimum: input 1 is not a supported type.");
2004 
2005  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2006  "Reference minimum: output is not a supported type.");
2007 
2009  "Reference minimum: input 0 and Input 1 types are mismatched");
2010 
2012  "Reference minimum: input and output types are mismatched");
2013 
2015  "Reference minimum: shapes are not suitable for implicit broadcast.");
2016 
2017  return supported;
2018 }
2019 
2021  const TensorInfo& input1,
2022  const TensorInfo& output,
2023  Optional<std::string&> reasonIfUnsupported) const
2024 {
2025  bool supported = true;
2026 
2027  std::array<DataType,7> supportedTypes = {
2034  };
2035 
2036  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2037  "Reference multiplication: input 0 is not a supported type.");
2038 
2039  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2040  "Reference multiplication: input 1 is not a supported type.");
2041 
2042  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2043  "Reference multiplication: output is not a supported type.");
2044 
2046  "Reference multiplication: input 0 and Input 1 types are mismatched");
2047 
2049  "Reference multiplication: input and output types are mismatched");
2050 
2052  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2053 
2054  return supported;
2055 }
2056 
2058  const TensorInfo& output,
2059  const NormalizationDescriptor& descriptor,
2060  Optional<std::string&> reasonIfUnsupported) const
2061 {
2063 
2064  // Define supported types
2065  std::array<DataType, 6> supportedTypes =
2066  {
2072  };
2073 
2074  bool supported = true;
2075 
2076  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2077  "Reference normalization: input type not supported.");
2078 
2079  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2080  "Reference normalization: output type not supported.");
2081 
2083  "Reference normalization: input and output shapes have different "
2084  "num total elements.");
2085 
2086  return supported;
2087 }
2088 
2090  Optional<std::string&> /*reasonIfUnsupported*/) const
2091 {
2092  return true;
2093 }
2094 
2096  const TensorInfo& output,
2097  const PadDescriptor& descriptor,
2098  Optional<std::string&> reasonIfUnsupported) const
2099 {
2101  bool supported = true;
2102 
2103  // Define supported output and inputs types.
2104  std::array<DataType,6> supportedTypes =
2105  {
2111  };
2112 
2113  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2114  "Reference pad: input is not a supported type.");
2115 
2116  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2117  "Reference pad: output is not a supported type.");
2118 
2120  "Reference pad: input and output types are mismatched.");
2121 
2122  return supported;
2123 }
2124 
2126  const TensorInfo& output,
2127  const PermuteDescriptor& descriptor,
2128  Optional<std::string&> reasonIfUnsupported) const
2129 {
2131  bool supported = true;
2132 
2133  // Define supported output and inputs types.
2134  std::array<DataType, 6> supportedTypes =
2135  {
2142  };
2143 
2144  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2145  "Reference permute: input is not a supported type.");
2146 
2147  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2148  "Reference permute: output is not a supported type.");
2149 
2151  "Reference permute: input and output types are mismatched.");
2152 
2153  return supported;
2154 }
2155 
2157  const TensorInfo& output,
2158  const Pooling2dDescriptor& descriptor,
2159  Optional<std::string&> reasonIfUnsupported) const
2160 {
2162  bool supported = true;
2163 
2164  // Define supported output and inputs types.
2165  std::array<DataType,6> supportedTypes =
2166  {
2172  };
2173 
2174  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2175  "Reference poolind2d: input is not a supported type.");
2176 
2177  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2178  "Reference poolind2d: output is not a supported type.");
2179 
2181  "Reference poolind2d: input and output types are mismatched.");
2182 
2183  return supported;
2184 }
2185 
2187  const TensorInfo& output,
2188  const Pooling3dDescriptor& descriptor,
2189  Optional<std::string&> reasonIfUnsupported) const
2190 {
2192  bool supported = true;
2193 
2194  // Define supported output and inputs types.
2195  std::array<DataType,6> supportedTypes =
2196  {
2202  };
2203 
2204  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2205  "Reference poolind3d: input is not a supported type.");
2206 
2207  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2208  "Reference poolind3d: output is not a supported type.");
2209 
2211  "Reference poolind3d: input and output types are mismatched.");
2212 
2213  return supported;
2214 }
2215 
2216 
2218  const TensorInfo& previousOutputIn,
2219  const TensorInfo& previousCellStateIn,
2220  const TensorInfo& outputStateOut,
2221  const TensorInfo& cellStateOut,
2222  const TensorInfo& output,
2223  const QLstmDescriptor& descriptor,
2224  const LstmInputParamsInfo& paramsInfo,
2225  Optional<std::string&> reasonIfUnsupported) const
2226 {
2227  IgnoreUnused(input);
2235 
2237 
2238  return true;
2239 }
2240 
2242  const TensorInfo& output,
2243  Optional<std::string&> reasonIfUnsupported) const
2244 {
2245  bool supported = true;
2246 
2247  // Define supported input types.
2248  std::array<DataType,7> supportedInputTypes = {
2255  };
2256 
2257  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2258  "Reference quantize: input type not supported.");
2259 
2260  // Define supported output types.
2261  std::array<DataType,4> supportedOutputTypes = {
2266  };
2267  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2268  "Reference quantize: output type not supported.");
2269 
2271  "Reference quantize: input and output shapes have different num total elements.");
2272 
2273  return supported;
2274 }
2275 
2277  const TensorInfo& output,
2278  Optional<std::string&> reasonIfUnsupported) const
2279 {
2280  IgnoreUnused(input);
2281  // Define supported output types.
2282  std::array<DataType,1> supportedOutputTypes =
2283  {
2285  };
2286 
2287  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2288  "Reference rank: input type not supported.");
2289 }
2290 
2292  const TensorInfo& output,
2293  const ReduceDescriptor& descriptor,
2294  Optional<std::string&> reasonIfUnsupported) const
2295 {
2297  bool supported = true;
2298  std::array<DataType,7> supportedTypes =
2299  {
2306  };
2307 
2308  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2309  "Reference Reduce: input type not supported");
2310 
2311  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2312  "Reference Reduce: output type not supported");
2313 
2315  "Reference Reduce: input and output types not matching");
2316 
2317  return supported;
2318 }
2319 
2321  const TensorInfo& output,
2322  const ReshapeDescriptor& descriptor,
2323  Optional<std::string&> reasonIfUnsupported) const
2324 {
2327  // Define supported output types.
2328  std::array<DataType,8> supportedOutputTypes =
2329  {
2338  };
2339 
2340  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2341  "Reference reshape: input type not supported.");
2342 }
2343 
2345  const TensorInfo& output,
2346  const ResizeDescriptor& descriptor,
2347  Optional<std::string&> reasonIfUnsupported) const
2348 {
2350  bool supported = true;
2351  std::array<DataType,6> supportedTypes =
2352  {
2359  };
2360 
2361  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2362  "Reference Resize: input type not supported");
2363 
2364  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2365  "Reference Resize: output type not supported");
2366 
2368  "Reference Resize: input and output types not matching");
2369 
2370  return supported;
2371 }
2372 
2374  const TensorInfo& output,
2375  Optional<std::string&> reasonIfUnsupported) const
2376 {
2377  IgnoreUnused(input);
2378  bool supported = true;
2379 
2380  std::array<DataType, 1> supportedTypes =
2381  {
2383  };
2384 
2385  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2386  "Reference Shape: output type not supported");
2387 
2388  return supported;
2389 }
2390 
2392  const TensorInfo& output,
2393  const SliceDescriptor& descriptor,
2394  Optional<std::string&> reasonIfUnsupported) const
2395 {
2397  bool supported = true;
2398 
2399  std::array<DataType, 5> supportedTypes =
2400  {
2405  };
2406 
2407  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2408  "Reference Slice: input type not supported");
2409 
2410  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2411  "Reference Slice: output type not supported");
2412 
2414  "Reference Slice: input and output types are mismatched");
2415 
2416  return supported;
2417 }
2418 
2420  const TensorInfo& output,
2421  const SoftmaxDescriptor& descriptor,
2422  Optional<std::string&> reasonIfUnsupported) const
2423 {
2425  bool supported = true;
2426  std::array<DataType,7> supportedTypes =
2427  {
2434  };
2435 
2436  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2437  "Reference Softmax: output type not supported");
2438 
2439  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2440  "Reference Softmax: input type not supported");
2441 
2443  "Reference Softmax: input type not supported");
2444 
2445  return supported;
2446 }
2447 
2449  const TensorInfo& output,
2450  const SpaceToBatchNdDescriptor& descriptor,
2451  Optional<std::string&> reasonIfUnsupported) const
2452 {
2454  bool supported = true;
2455  std::array<DataType,6> supportedTypes =
2456  {
2462  };
2463 
2464  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2465  "Reference SpaceToBatchNd: input type not supported");
2466 
2467  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2468  "Reference SpaceToBatchNd: output type not supported");
2469 
2471  "Reference SpaceToBatchNd: input and output types are mismatched");
2472 
2473  return supported;
2474 }
2475 
2477  const TensorInfo& output,
2478  const SpaceToDepthDescriptor& descriptor,
2479  Optional<std::string&> reasonIfUnsupported) const
2480 {
2481 
2483  bool supported = true;
2484 
2485  std::array<DataType,6> supportedTypes =
2486  {
2492  };
2493 
2494  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2495  "Reference SpaceToDepth: input type not supported");
2496 
2497  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2498  "Reference SpaceToDepth: output type not supported");
2499 
2501  "Reference SpaceToDepth: input and output types are mismatched");
2502 
2503  return supported;
2504 }
2505 
2507  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2508  const ViewsDescriptor& descriptor,
2509  Optional<std::string&> reasonIfUnsupported) const
2510 {
2512  bool supported = true;
2513  std::array<DataType,6> supportedTypes =
2514  {
2520  };
2521 
2522  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2523  "Reference splitter: output type not supported");
2524  for (const TensorInfo& output : outputs)
2525  {
2526  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2527  "Reference splitter: input type not supported");
2528 
2530  "Reference splitter: input and output types mismatched.");
2531  }
2532 
2533  return supported;
2534 }
2535 
2536 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2537  const TensorInfo& output,
2538  const StackDescriptor& descriptor,
2539  Optional<std::string&> reasonIfUnsupported) const
2540 {
2542 
2543  bool supported = true;
2544  std::array<DataType,7> supportedTypes =
2545  {
2552  };
2553 
2554  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2555  "Reference stack: output type not supported");
2556  for (const TensorInfo* input : inputs)
2557  {
2558  ARMNN_ASSERT(input != nullptr);
2559  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2560  "Reference stack: input type not supported");
2561 
2563  "Reference stack: input and output types mismatched.");
2564  }
2565 
2566  return supported;
2567 }
2568 
2570  const TensorInfo& output,
2571  const StridedSliceDescriptor& descriptor,
2572  Optional<std::string&> reasonIfUnsupported) const
2573 {
2575  bool supported = true;
2576 
2577  std::array<DataType,5> supportedTypes =
2578  {
2583  };
2584 
2585  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2586  "Reference StridedSlice: input type not supported");
2587 
2588  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2589  "Reference StridedSlice: output type not supported");
2590 
2592  "Reference StridedSlice: input and output types are mismatched");
2593 
2594  return supported;
2595 }
2596 
2598  const TensorInfo& input1,
2599  const TensorInfo& output,
2600  Optional<std::string&> reasonIfUnsupported) const
2601 {
2602  bool supported = true;
2603 
2604  std::array<DataType,7> supportedTypes = {
2611  };
2612 
2613  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2614  "Reference subtraction: input 0 is not a supported type.");
2615 
2616  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2617  "Reference subtraction: input 1 is not a supported type.");
2618 
2619  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2620  "Reference subtraction: output is not a supported type.");
2621 
2623  "Reference subtraction: input 0 and Input 1 types are mismatched");
2624 
2626  "Reference subtraction: input and output types are mismatched");
2627 
2629  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2630 
2631  return supported;
2632 }
2633 
2635  const TensorInfo& alpha,
2636  const TensorInfo& output,
2637  Optional<std::string&> reasonIfUnsupported) const
2638 {
2639  bool supported = true;
2640 
2641  std::array<DataType, 6> supportedTypes
2642  {
2648  };
2649 
2650  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2651  "PReLU: input is not a supported type.");
2652 
2653  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2654  "PReLU: alpha is not a supported type.");
2655 
2656  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2657  "PReLU: output is not a supported type.");
2658 
2660  "PReLU: input, alpha and output types are mismatched");
2661 
2663  "PReLU: shapes are not suitable for implicit broadcast");
2664 
2665  return supported;
2666 }
2667 
2669  const TensorInfo& output,
2670  const TransposeConvolution2dDescriptor& descriptor,
2671  const TensorInfo& weights,
2672  const Optional<TensorInfo>& biases,
2673  Optional<std::string&> reasonIfUnsupported) const
2674 {
2676  bool supported = true;
2677 
2678  std::array<DataType,7> supportedTypes =
2679  {
2686  };
2687 
2688  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2689  "Reference TransposeConvolution2d: input is not a supported type.");
2690 
2691  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2692  "Reference TransposeConvolution2d: output is not a supported type.");
2693 
2695  "Reference TransposeConvolution2d: input and output types mismatched.");
2696 
2697 
2698  const DataType inputType = input.GetDataType();
2699  if (IsQuantized8BitType(inputType))
2700  {
2701  std::array<DataType, 3> supportedWeightTypes =
2702  {
2706  };
2707 
2708  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2709  "Reference TransposeConvolution2d: weights type not supported for "
2710  "quantized input.");
2711  }
2712  else
2713  {
2714  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2715  "Reference TransposeConvolution2d: weights is not a supported type.");
2716 
2718  "Reference TransposeConvolution2d: input and weights types mismatched.");
2719  }
2720 
2721  if (biases.has_value())
2722  {
2723  std::array<DataType,4> biasesSupportedTypes =
2724  {
2728  };
2729  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2730  "Reference TransposeConvolution2d: biases is not a supported type.");
2731  }
2732 
2733  return supported;
2734 }
2735 
2737  const TensorInfo& output,
2738  const TransposeDescriptor& descriptor,
2739  Optional<std::string&> reasonIfUnsupported) const
2740 {
2742  bool supported = true;
2743 
2744  // Define supported output and inputs types.
2745  std::array<DataType, 6> supportedTypes =
2746  {
2753  };
2754 
2755  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2756  "Reference transpose: input is not a supported type.");
2757 
2758  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2759  "Reference transpose: output is not a supported type.");
2760 
2762  "Reference transpose: input and output types are mismatched.");
2763 
2764  return supported;
2765 }
2766 
2768  const TensorInfo& input,
2769  const TensorInfo& outputStateIn,
2770  const TensorInfo& cellStateIn,
2771  const TensorInfo& outputStateOut,
2772  const TensorInfo& cellStateOut,
2773  const TensorInfo& output,
2774  const UnidirectionalSequenceLstmDescriptor& descriptor,
2775  const LstmInputParamsInfo& paramsInfo,
2776  Optional<std::string&> reasonIfUnsupported) const
2777 {
2784  bool supported = true;
2785 
2786  std::array<DataType, 2> supportedTypes =
2787  {
2790  };
2791 
2792  std::array<DataType, 2> supportedWeightTypes =
2793  {
2796  };
2797 
2798  std::array<DataType, 3> supportedBiasTypes =
2799  {
2803  };
2804 
2805  // check inputs and outputs
2806  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2807  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2808  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2809  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2810 
2811  // check layer parameters
2812  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2814  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2815  "is not a supported type.");
2816  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2818  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2819  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2821  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2822  "is not a supported type.");
2823  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2825  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2826  "is not a supported type.");
2827  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2829  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2830  "is not a supported type.");
2831  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2833  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2834  "is not a supported type.");
2835 
2836  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2837  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2838  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2839  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2840  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2841  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2842  if (!descriptor.m_CifgEnabled)
2843  {
2844  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2846  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2847  "is not a supported type.");
2848  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2850  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2851  "is not a supported type.");
2852  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2853  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2854  if (descriptor.m_PeepholeEnabled)
2855  {
2856  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2858  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2859  "is not a supported type.");
2860  }
2861  }
2862  if (descriptor.m_PeepholeEnabled)
2863  {
2864  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2866  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2867  "is not a supported type.");
2868  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2870  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2871  "is not a supported type.");
2872  }
2873  if (descriptor.m_ProjectionEnabled)
2874  {
2875  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2877  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2878  "is not a supported type.");
2879  if (paramsInfo.m_ProjectionBias != nullptr)
2880  {
2881  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2882  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2883  "are mismatched");
2884  }
2885  }
2886  if (descriptor.m_LayerNormEnabled)
2887  {
2888  if (!descriptor.m_CifgEnabled)
2889  {
2890  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2892  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2893  "is not a supported type.");
2894  }
2895  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2897  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2898  "is not a supported type.");
2899  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2901  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2902  "is not a supported type.");
2903  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2905  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2906  "is not a supported type.");
2907  }
2908 
2909  return supported;
2910 }
2911 
2912 } // namespace armnn
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1744
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::ActivationFunction::Abs
@ Abs
armnn::ActivationFunction::Elu
@ Elu
armnn::LayerType::Floor
@ Floor
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1558
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:997
armnn::LayerType::MemCopy
@ MemCopy
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1134
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2344
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1622
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:538
armnn::ShapesAreSameRank
Definition: LayerSupportRules.hpp:138
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2241
armnn::LayerType::Transpose
@ Transpose
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2668
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:303
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1659
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2736
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:932
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:757
armnn::ActivationFunction::Linear
@ Linear
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1457
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:814
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::TensorNumDimensionsAreGreaterOrEqualTo
Definition: LayerSupportRules.hpp:189
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::ILayerSupport::detectionBoxes
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
Definition: ILayerSupport.hpp:174
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1218
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1195
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1069
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:495
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2057
armnn::LayerType::Quantize
@ Quantize
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1551
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2448
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:952
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1954
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1270
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:419
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1505
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1892
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2569
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:939
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2089
armnn::DataType::Signed32
@ Signed32
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2476
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::LayerType::Merge
@ Merge
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2020
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:509
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:875
armnn::LayerType::Addition
@ Addition
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1589
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2536
armnn::BiasAndWeightsTypesCompatible
Definition: LayerSupportRules.hpp:126
armnn::TensorNumDimensionsAreCorrect
Definition: LayerSupportRules.hpp:181
RefLayerSupport.hpp
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2276
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1066
armnn::LayerType::Division
@ Division
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:674
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1693
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2634
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1339
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:863
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:38
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1042
armnn::LayerType::Activation
@ Activation
armnn::ILayerSupport::detectionClasses
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
Definition: ILayerSupport.hpp:175
armnn::LayerType::Normalization
@ Normalization
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:701
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:913
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::LayerType::Reshape
@ Reshape
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1427
armnn::ILayerSupport::numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
Definition: ILayerSupport.hpp:177
armnn::LayerType::Resize
@ Resize
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:906
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:765
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1297
armnn::LayerType::Rank
@ Rank
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:647
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1139
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::RefLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:61
armnn::ShapesAreBroadcastCompatible
Definition: LayerSupportRules.hpp:154
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:843
LayerSupportRules.hpp
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::ShapesAreSameTotalSize
Definition: LayerSupportRules.hpp:146
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2156
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:797
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2373
armnn::DataType::Float32
@ Float32
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1529
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:576
armnn::LayerType::GatherNd
@ GatherNd
armnn::TypeIs
Definition: LayerSupportRules.hpp:102
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1855
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::DataType::Signed64
@ Signed64
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2095
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:522
armnn::LayerType::Lstm
@ Lstm
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:816
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1347
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1376
LayerSupportCommon.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2320
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
armnn::ActivationFunction::Square
@ Square
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1164
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::ILayerSupport::scores
const TensorInfo & scores
Definition: ILayerSupport.hpp:172
armnn::LayerType::Unmap
@ Unmap
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1193
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2291
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::BiasAndWeightsTypesMatch
Definition: LayerSupportRules.hpp:118
armnn::LayerType::Mean
@ Mean
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:642
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:990
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1470
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1407
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2391
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2506
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2217
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::ActivationFunction::TanH
@ TanH
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:359
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1485
armnn::TypeNotPerAxisQuantized
Definition: LayerSupportRules.hpp:110
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1445
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1628
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2597
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1983
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::ILayerSupport::detectionScores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
Definition: ILayerSupport.hpp:176
armnn::Optional
Definition: Optional.hpp:270
armnn::ILayerSupport::anchors
const TensorInfo const TensorInfo & anchors
Definition: ILayerSupport.hpp:173
armnn::RefLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1329
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:957
armnn::DataType::QSymmS8
@ QSymmS8
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:605
armnn::LayerType::Concat
@ Concat
NumericCast.hpp
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1492
armnn::DataType::QSymmS16
@ QSymmS16
armnn::FakeQuantizationDescriptor
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Definition: Descriptors.hpp:894
armnn::TypesAreEqual
Definition: LayerSupportRules.hpp:72
armnn::LayerType::Cast
@ Cast
armnn::ActivationFunction::ReLu
@ ReLu
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2419
IgnoreUnused.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2125
TypesUtils.hpp
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::LayerType::LogSoftmax
@ LogSoftmax
Types.hpp
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2186
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1262
armnn::Rule
Definition: LayerSupportRules.hpp:48
armnn::LayerType::Output
@ Output
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:719
armnn::DataType::Boolean
@ Boolean
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2767
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1718
armnn::LayerType::Prelu
@ Prelu
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:835
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::LayerType::Dequantize
@ Dequantize
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:977
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1010
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnn::TypeAnyOf
Definition: LayerSupportRules.hpp:90