ArmNN
 23.08
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74  reasonIfUnsupported);
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81  reasonIfUnsupported);
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
87  reasonIfUnsupported);
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
97  reasonIfUnsupported);
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102  reasonIfUnsupported);
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
108  reasonIfUnsupported);
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
119  reasonIfUnsupported);
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
128  {
129  if (infos.size() != 4)
130  {
131  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
132  "TensorInfos should be of format: {input, output, weights, biases}.");
133  }
134 
135  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
136  if (infos[3] == TensorInfo())
137  {
138  return IsConvolution2dSupported(infos[0],
139  infos[1],
140  desc,
141  infos[2],
142  EmptyOptional(),
143  reasonIfUnsupported);
144  }
145  else
146  {
147  return IsConvolution2dSupported(infos[0],
148  infos[1],
149  desc,
150  infos[2],
151  infos[3],
152  reasonIfUnsupported);
153  }
154  }
156  return IsDepthToSpaceSupported(infos[0],
157  infos[1],
158  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
159  reasonIfUnsupported);
161  {
162  if (infos.size() != 4)
163  {
164  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
165  "TensorInfos should be of format: {input, output, weights, biases}.");
166  }
167 
168  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
169  if (infos[3] == TensorInfo())
170  {
171  return IsDepthwiseConvolutionSupported(infos[0],
172  infos[1],
173  desc,
174  infos[2],
175  EmptyOptional(),
176  reasonIfUnsupported);
177  }
178  else
179  {
180  return IsDepthwiseConvolutionSupported(infos[0],
181  infos[1],
182  desc,
183  infos[2],
184  infos[3],
185  reasonIfUnsupported);
186  }
187  }
189  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
190  case LayerType::Division:
191  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
193  {
194  std::array<DataType, 7> supportedTypes =
195  {
202  };
203 
204  bool supported = true;
205  supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
206  "Reference elementwise unary: input type not supported");
207 
208  supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
209  "Reference elementwise unary: input type not supported");
210 
211  supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
212  "Reference elementwise unary: output type not supported");
213 
214  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
215  "Reference elementwise unary: input types not matching");
216 
217  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
218  "Reference elementwise unary: input and output types not matching");
219 
220  return supported;
221  }
223  return IsElementwiseUnarySupported(infos[0],
224  infos[1],
225  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
226  reasonIfUnsupported);
227  case LayerType::Fill:
228  return IsFillSupported(infos[0],
229  infos[1],
230  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
231  reasonIfUnsupported);
232  case LayerType::Floor:
233  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
235  return IsFullyConnectedSupported(infos[0],
236  infos[1],
237  infos[2],
238  infos[3],
239  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
240  reasonIfUnsupported);
241  case LayerType::Gather:
242  return IsGatherSupported(infos[0],
243  infos[1],
244  infos[2],
245  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
246  reasonIfUnsupported);
247  case LayerType::GatherNd:
248  return IsGatherNdSupported(infos[0],
249  infos[1],
250  infos[2],
251  reasonIfUnsupported);
252  case LayerType::Input:
253  return IsInputSupported(infos[0], reasonIfUnsupported);
255  return IsInstanceNormalizationSupported(infos[0],
256  infos[1],
257  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
258  (&descriptor)),
259  reasonIfUnsupported);
261  return IsL2NormalizationSupported(infos[0],
262  infos[1],
263  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
264  reasonIfUnsupported);
266  return IsLogicalBinarySupported(infos[0],
267  infos[1],
268  infos[2],
269  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
270  reasonIfUnsupported);
272  return IsLogSoftmaxSupported(infos[0],
273  infos[1],
274  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
275  reasonIfUnsupported);
276  case LayerType::Lstm:
277  return IsLstmSupported(infos[0],
278  infos[1],
279  infos[2],
280  infos[3],
281  infos[4],
282  infos[5],
283  infos[6],
284  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
285  lstmParamsInfo.value(),
286  reasonIfUnsupported);
287  case LayerType::QLstm:
288  return IsQLstmSupported(infos[0],
289  infos[1],
290  infos[2],
291  infos[3],
292  infos[4],
293  infos[5],
294  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
295  lstmParamsInfo.value(),
296  reasonIfUnsupported);
297  case LayerType::Maximum:
298  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
299  case LayerType::Mean:
300  return IsMeanSupported(infos[0],
301  infos[1],
302  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
303  reasonIfUnsupported);
304  case LayerType::Minimum:
305  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
307  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
309  return IsNormalizationSupported(infos[0],
310  infos[1],
311  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
312  reasonIfUnsupported);
313  case LayerType::Output:
314  return IsOutputSupported(infos[0], reasonIfUnsupported);
315  case LayerType::Pad:
316  return IsPadSupported(infos[0],
317  infos[1],
318  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
319  reasonIfUnsupported);
320  case LayerType::Permute:
321  return IsPermuteSupported(infos[0],
322  infos[1],
323  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
324  reasonIfUnsupported);
326  return IsPooling2dSupported(infos[0],
327  infos[1],
328  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
329  reasonIfUnsupported);
330  case LayerType::Prelu:
331  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
332  case LayerType::Quantize:
333  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
334  case LayerType::Reshape:
335  return IsReshapeSupported(infos[0],
336  infos[1],
337  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
338  reasonIfUnsupported);
339  case LayerType::Resize:
340  return IsResizeSupported(infos[0],
341  infos[1],
342  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
343  reasonIfUnsupported);
345  return IsReverseV2Supported(infos[0],
346  infos[1],
347  infos[2],
348  reasonIfUnsupported);
349  case LayerType::Reduce:
350  return IsReduceSupported(infos[0],
351  infos[1],
352  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
353  reasonIfUnsupported);
354  case LayerType::Slice:
355  return IsSliceSupported(infos[0],
356  infos[1],
357  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
358  reasonIfUnsupported);
359  case LayerType::Softmax:
360  return IsSoftmaxSupported(infos[0],
361  infos[1],
362  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
363  reasonIfUnsupported);
365  return IsSpaceToBatchNdSupported(infos[0],
366  infos[1],
367  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
368  reasonIfUnsupported);
370  return IsSpaceToDepthSupported(infos[0],
371  infos[1],
372  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
373  reasonIfUnsupported);
374  case LayerType::Splitter:
375  {
376  std::vector<TensorInfo> outputInfos;
377  for (uint32_t i = 1; i < infos.size(); i++)
378  {
379  outputInfos.push_back(infos[i]);
380  }
381  return IsSplitterSupported(infos[0],
382  {outputInfos.begin(), outputInfos.end()},
383  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
384  reasonIfUnsupported);
385  }
386  case LayerType::Stack:
387  {
388  std::vector<const TensorInfo*> inputInfos;
389  for (uint32_t i = 0; i < infos.size() - 1; i++)
390  {
391  inputInfos.push_back(&infos[i]);
392  }
393  return IsStackSupported(inputInfos,
394  infos[infos.size() - 1],
395  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
396  reasonIfUnsupported);
397  }
399  return IsStridedSliceSupported(infos[0],
400  infos[1],
401  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
402  reasonIfUnsupported);
404  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
405  case LayerType::Tile:
406  return IsTileSupported(infos[0],
407  infos[1],
408  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
409  reasonIfUnsupported);
411  return IsTransposeSupported(infos[0],
412  infos[1],
413  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
414  reasonIfUnsupported);
416  {
417  if (infos.size() != 4)
418  {
419  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
420  "TensorInfos should be of format: {input, output, weights, biases}.");
421  }
422 
423  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
424  if (infos[3] == TensorInfo())
425  {
426  return IsTransposeConvolution2dSupported(infos[0],
427  infos[1],
428  desc,
429  infos[2],
430  EmptyOptional(),
431  reasonIfUnsupported);
432  }
433  else
434  {
435  return IsTransposeConvolution2dSupported(infos[0],
436  infos[1],
437  desc,
438  infos[2],
439  infos[3],
440  reasonIfUnsupported);
441  }
442  }
443  case LayerType::Cast:
444  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
446  return IsChannelShuffleSupported(infos[0],
447  infos[1],
448  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
449  reasonIfUnsupported);
451  {
452  if (infos.size() != 4)
453  {
454  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
455  "TensorInfos should be of format: {input, output, weights, biases}.");
456  }
457 
458  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
459  if (infos[3] == TensorInfo())
460  {
461  return IsConvolution3dSupported(infos[0],
462  infos[1],
463  desc,
464  infos[2],
465  EmptyOptional(),
466  reasonIfUnsupported);
467  }
468  else
469  {
470  return IsConvolution3dSupported(infos[0],
471  infos[1],
472  desc,
473  infos[2],
474  infos[3],
475  reasonIfUnsupported);
476  }
477  }
478  case LayerType::Debug:
479  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
481  return IsDetectionPostProcessSupported(infos[0],
482  infos[1],
483  infos[2],
484  infos[3],
485  infos[4],
486  infos[5],
487  infos[6],
488  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
489  (&descriptor)),
490  reasonIfUnsupported);
492  return IsFakeQuantizationSupported(infos[0],
493  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
494  reasonIfUnsupported);
495  case LayerType::MemCopy:
496  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
497  case LayerType::Rank:
498  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
499  case LayerType::Shape:
500  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
502  {
503  if (infos.size() != 6)
504  {
505  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
506  "should be of format: {input, outputStateIn, cellStateIn, "
507  "hiddenStateOutputVal, cellStateOutputVal, output}");
508  }
509  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
511  infos[1],
512  infos[2],
513  infos[3],
514  infos[4],
515  infos[5],
516  desc,
517  lstmParamsInfo.value(),
518  reasonIfUnsupported);
519  }
521  return IsPooling3dSupported(infos[0],
522  infos[1],
523  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
524  reasonIfUnsupported);
525  case LayerType::Map:
526  return true;
527  case LayerType::Unmap:
528  return true;
530  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
531  case LayerType::Merge:
532  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
535  infos[1],
536  infos[2],
537  infos[3],
538  infos[4],
539  quantizedLstmInputParamsInfo.value(),
540  reasonIfUnsupported);
541  default:
542  // layers not supported in neon by default:
543  // precompiled, standin, switch
544  return false;
545  }
546 }
547 
549  const TensorInfo& output,
550  const ActivationDescriptor& descriptor,
551  Optional<std::string&> reasonIfUnsupported) const
552 {
553  bool supported = true;
554 
555  // Define supported types.
556  std::array<DataType,6> supportedTypes = {
562  };
563 
564  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
565  "Reference activation: input type not supported.");
566 
567  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
568  "Reference activation: output type not supported.");
569 
570  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
571  "Reference activation: input and output types mismatched.");
572 
573  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
574  "Reference activation: input and output shapes are of different rank.");
575 
576 
577  struct ActivationFunctionSupported : public Rule
578  {
579  ActivationFunctionSupported(const ActivationDescriptor& desc)
580  {
581  switch(desc.m_Function)
582  {
595  {
596  m_Res = true;
597  break;
598  }
599  default:
600  {
601  m_Res = false;
602  break;
603  }
604  }
605  }
606  };
607 
608  // Function is supported
609  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
610  "Reference activation: function not supported.");
611 
612  return supported;
613 }
614 
616  const TensorInfo& input1,
617  const TensorInfo& output,
618  Optional<std::string&> reasonIfUnsupported) const
619 {
620  bool supported = true;
621 
622  std::array<DataType,7> supportedTypes = {
629  };
630 
631  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
632  "Reference addition: input 0 is not a supported type.");
633 
634  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
635  "Reference addition: input 1 is not a supported type.");
636 
637  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
638  "Reference addition: output is not a supported type.");
639 
640  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
641  "Reference addition: input 0 and Input 1 types are mismatched");
642 
643  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
644  "Reference addition: input and output types are mismatched");
645 
646  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
647  "Reference addition: shapes are not suitable for implicit broadcast.");
648 
649  return supported;
650 }
651 
653  const armnn::ArgMinMaxDescriptor &descriptor,
654  armnn::Optional<std::string &> reasonIfUnsupported) const
655 {
656  IgnoreUnused(descriptor);
657 
658  std::array<DataType, 8> supportedInputTypes =
659  {
667  };
668 
669  std::array<DataType,2> supportedOutputTypes = {
672  };
673 
674  bool supported = true;
675 
676  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
677  "Reference ArgMinMax: input is not a supported type.");
678  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
679  "Reference ArgMinMax: output type not supported");
680 
681  return supported;
682 }
683 
685  const TensorInfo& inputY,
686  const TensorInfo& output,
687  const BatchMatMulDescriptor& descriptor,
688  Optional<std::string &> reasonIfUnsupported) const
689 {
690  IgnoreUnused(descriptor);
691 
692  std::array<DataType, 6> supportedTypes =
693  {
699  };
700 
701  bool supported = true;
702 
703  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
704  "Reference batch matrix multiplication: input X is not a supported type");
705 
706  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
707  "Reference batch matrix multiplication: input Y is not a supported type");
708 
709  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
710  "Reference batch matrix multiplication: output is not a supported type");
711 
712  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
713  "Reference batch matrix multiplication: input X and input Y types are mismatched");
714 
715  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
716  "Reference batch matrix multiplication: inputs and output types are mismatched");
717 
719  reasonIfUnsupported,
720  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
721 
723  reasonIfUnsupported,
724  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
725 
726  return supported;
727 }
728 
730  const TensorInfo& output,
731  const TensorInfo& mean,
732  const TensorInfo& variance,
733  const TensorInfo& beta,
734  const TensorInfo& gamma,
735  const BatchNormalizationDescriptor& descriptor,
736  Optional<std::string&> reasonIfUnsupported) const
737 {
738  IgnoreUnused(descriptor);
739 
740  std::array<DataType, 6> supportedTypes =
741  {
747  };
748 
749  bool supported = true;
750 
751  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
752  "Reference batch normalization: input is not a supported type.");
753 
754  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
755  "Reference batch normalization: output is not a supported type.");
756 
757  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
758  "Reference batch normalization: input and output types are mismatched");
759 
760  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
761  "Reference batch normalization: mean is not a supported type.");
762 
763  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
764  "Reference batch normalization: variance is not a supported type.");
765 
766  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
767  "Reference batch normalization: beta is not a supported type.");
768 
769  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
770  "Reference batch normalization: gamma is not a supported type.");
771 
772  return supported;
773 }
774 
776  const TensorInfo& output,
777  const BatchToSpaceNdDescriptor& descriptor,
778  Optional<std::string&> reasonIfUnsupported) const
779 {
780  IgnoreUnused(descriptor);
781 
782  bool supported = true;
783 
784  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
785  std::string inputTensorStr = "input";
786  std::string outputTensorStr = "output";
787 
788  // Define supported types.
789  std::array<DataType,6> supportedTypes =
790  {
796  };
797 
798  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
799  "Reference BatchToSpaceNd: input type not supported.");
800 
801  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
802  "Reference BatchToSpaceNd: output type not supported.");
803 
804  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
805  "Reference BatchToSpaceNd: input and output types mismatched.");
806 
807  return supported;
808 }
809 
811  const TensorInfo& output,
812  Optional<std::string&> reasonIfUnsupported) const
813 {
814  std::array<DataType, 9> supportedInputTypes =
815  {
823  };
824 
825  bool supported = true;
826  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
827  "Reference cast: input is not a supported type");
828 
829 
830  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
831  "Reference cast: output is not a supported type");
832 
833  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
834  "Reference cast: input and output shapes have different number of total elements");
835 
836  return supported;
837 }
838 
840  const TensorInfo& output,
841  const ChannelShuffleDescriptor& descriptor,
842  Optional<std::string&> reasonIfUnsupported) const
843 {
844  IgnoreUnused(descriptor);
845  bool supported = true;
846 
847  // Define supported output and inputs types.
848  std::array<DataType, 7> supportedTypes =
849  {
856  };
857 
858  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
859  "Reference ChannelShuffle: input is not a supported type.");
860 
861  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
862  "Reference ChannelShuffle: output is not a supported type.");
863 
864  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
865  "Reference ChannelShuffle: input and output types are mismatched.");
866 
867  return supported;
868 }
869 
870 
872  const TensorInfo& input1,
873  const TensorInfo& output,
874  const ComparisonDescriptor& descriptor,
875  Optional<std::string&> reasonIfUnsupported) const
876 {
877  IgnoreUnused(descriptor);
878  std::array<DataType, 8> supportedInputTypes =
879  {
887  };
888 
889  bool supported = true;
890  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
891  "Reference comparison: input 0 is not a supported type");
892 
893  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
894  "Reference comparison: input 0 and Input 1 types are mismatched");
895 
896  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
897  "Reference comparison: output is not of type Boolean");
898 
899  return supported;
900 }
901 
902 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
903  const TensorInfo& output,
904  const OriginsDescriptor& descriptor,
905  Optional<std::string&> reasonIfUnsupported) const
906 {
907  IgnoreUnused(descriptor);
908 
909  bool supported = true;
910  std::array<DataType,7> supportedTypes =
911  {
918  };
919 
920  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
921  "Reference concatenation: output type not supported");
922  for (const TensorInfo* input : inputs)
923  {
924  ARMNN_ASSERT(input != nullptr);
925  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
926  "Reference concatenation: input type not supported");
927 
928  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
929  "Reference concatenation: input and output types mismatched.");
930  }
931 
932  return supported;
933 }
934 
936  Optional<std::string&> reasonIfUnsupported) const
937 {
938  std::array<DataType,8> supportedTypes =
939  {
947  };
948 
949  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
950  "Reference constant: output is not a supported type.");
951 }
952 
954  const TensorInfo& output,
955  Optional<std::string&> reasonIfUnsupported) const
956 {
957  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
958  input.GetDataType(),
959  &TrueFunc<>,
960  &FalseInputFuncF32<>,
961  &FalseFuncU8<>,
962  &FalseFuncI32<>,
963  &FalseFuncU8<>) &&
964  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
965  output.GetDataType(),
966  &FalseOutputFuncF16<>,
967  &TrueFunc<>,
968  &FalseFuncU8<>,
969  &FalseFuncI32<>,
970  &FalseFuncU8<>));
971 }
972 
974  const TensorInfo& output,
975  Optional<std::string&> reasonIfUnsupported) const
976 {
977  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
978  input.GetDataType(),
979  &FalseInputFuncF16<>,
980  &TrueFunc<>,
981  &FalseFuncU8<>,
982  &FalseFuncI32<>,
983  &FalseFuncU8<>) &&
984  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
985  output.GetDataType(),
986  &TrueFunc<>,
987  &FalseOutputFuncF32<>,
988  &FalseFuncU8<>,
989  &FalseFuncI32<>,
990  &FalseFuncU8<>));
991 }
992 
994  const TensorInfo& output,
995  const Convolution2dDescriptor& descriptor,
996  const TensorInfo& weights,
997  const Optional<TensorInfo>& biases,
998  Optional<std::string&> reasonIfUnsupported) const
999 {
1000  bool supported = true;
1001 
1002  // Define supported types.
1003  std::array<DataType,7> supportedTypes =
1004  {
1011  };
1012 
1013  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1014  "Reference Convolution2d: input is not a supported type.");
1015 
1016  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1017  "Reference Convolution2d: output is not a supported type.");
1018 
1019  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1020  "Reference Convolution2d: input and output types mismatched.");
1021 
1022 
1023  const DataType inputType = input.GetDataType();
1024  if (IsQuantized8BitType(inputType))
1025  {
1026  std::array<DataType, 3> supportedWeightTypes =
1027  {
1031  };
1032 
1033  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1034  "Reference Convolution2d: weights type not supported for quantized input.");
1035  }
1036  else
1037  {
1038  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1039  "Reference Convolution2d: weights is not a supported type.");
1040 
1041  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1042  "Reference Convolution2d: input and weights types mismatched.");
1043  }
1044 
1045  if (biases.has_value())
1046  {
1047  std::array<DataType,4> biasesSupportedTypes =
1048  {
1052  };
1053 
1054  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1055  "Reference Convolution2d: biases is not a supported type.");
1056  }
1057  IgnoreUnused(descriptor);
1058 
1059  return supported;
1060 }
1061 
1063  const TensorInfo& output,
1064  const Convolution3dDescriptor& descriptor,
1065  const TensorInfo& weights,
1066  const Optional<TensorInfo>& biases,
1067  Optional<std::string&> reasonIfUnsupported) const
1068 {
1069  bool supported = true;
1070 
1071  // Define supported types.
1072  std::array<DataType,7> supportedTypes =
1073  {
1080  };
1081 
1082  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1083  "Reference Convolution3d: input is not a supported type.");
1084 
1085  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1086  "Reference Convolution3d: output is not a supported type.");
1087 
1088  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1089  "Reference Convolution3d: input and output types mismatched.");
1090 
1091  const DataType inputType = input.GetDataType();
1092  if (IsQuantized8BitType(inputType))
1093  {
1094  std::array<DataType, 3> supportedWeightTypes =
1095  {
1099  };
1100 
1101  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1102  "Reference Convolution3d: weights type not supported for quantized input.");
1103  }
1104  else
1105  {
1106  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1107  "Reference Convolution3d: weights is not a supported type.");
1108 
1109  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1110  "Reference Convolution3d: input and weights types mismatched.");
1111  }
1112 
1113  if (biases.has_value())
1114  {
1115  std::array<DataType,4> biasesSupportedTypes =
1116  {
1120  };
1121 
1122  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1123  "Reference Convolution3d: biases is not a supported type.");
1124  }
1125  IgnoreUnused(descriptor);
1126 
1127  return supported;
1128 }
1129 
1131  const TensorInfo& output,
1132  Optional<std::string&> reasonIfUnsupported) const
1133 {
1134  bool supported = true;
1135 
1136  std::array<DataType, 8> supportedTypes =
1137  {
1146  };
1147 
1148  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1149  "Reference for Debug layer: input type not supported");
1150 
1151  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1152  "Reference for Debug layer: output type not supported");
1153 
1154  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1155  "Reference for Debug layer: input and output types are mismatched");
1156 
1157  return supported;
1158 }
1159 
1161  const TensorInfo& output,
1162  const DepthToSpaceDescriptor& descriptor,
1163  Optional<std::string&> reasonIfUnsupported) const
1164 {
1165  IgnoreUnused(descriptor);
1166  bool supported = true;
1167 
1168  std::array<DataType,6> supportedTypes =
1169  {
1175  };
1176 
1177  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1178  "Reference DepthToSpace: input type not supported");
1179 
1180  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1181  "Reference DepthToSpace: output type not supported");
1182 
1183  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1184  "Reference DepthToSpace: input and output types are mismatched");
1185 
1186  return supported;
1187 }
1188 
1190  const TensorInfo& output,
1191  const DepthwiseConvolution2dDescriptor& descriptor,
1192  const TensorInfo& weights,
1193  const Optional<TensorInfo>& biases,
1194  Optional<std::string&> reasonIfUnsupported) const
1195 {
1196  IgnoreUnused(descriptor);
1197  bool supported = true;
1198 
1199  // Define supported types.
1200  std::array<DataType,7> supportedTypes =
1201  {
1208  };
1209 
1210  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1211  "Reference DepthwiseConvolution2d: input is not a supported type.");
1212 
1213  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1214  "Reference DepthwiseConvolution2d: output is not a supported type.");
1215 
1216  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1217  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1218 
1219  const DataType inputType = input.GetDataType();
1220  if (IsQuantized8BitType(inputType))
1221  {
1222  std::array<DataType, 3> supportedWeightTypes =
1223  {
1227  };
1228 
1229  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1230  "Reference DepthwiseConvolution2d: weights type not supported for "
1231  "quantized input.");
1232  }
1233  else
1234  {
1235  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1236  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1237 
1238  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1239  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1240  }
1241 
1242  if (biases.has_value())
1243  {
1244  std::array<DataType,4> biasesSupportedTypes =
1245  {
1249  };
1250  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1251  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1252  }
1253 
1254  return supported;
1255 
1256 }
1257 
1259  const TensorInfo& output,
1260  Optional<std::string&> reasonIfUnsupported) const
1261 {
1262  bool supported = true;
1263 
1264  std::array<DataType,5> supportedInputTypes = {
1270  };
1271 
1272  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1273  "Reference for Dequantize layer: input type not supported.");
1274 
1275  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1276  "Reference for Dequantize layer: per-axis quantized input not supported.");
1277 
1278  std::array<DataType,3> supportedOutputTypes = {
1281  };
1282 
1283  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1284  "Reference for Dequantize layer: output type not supported.");
1285 
1286  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1287  "Reference for Dequantize layer: input/output shapes have different num total "
1288  "elements.");
1289 
1290  return supported;
1291 }
1292 
1294  const TensorInfo& scores,
1295  const TensorInfo& anchors,
1296  const TensorInfo& detectionBoxes,
1297  const TensorInfo& detectionClasses,
1298  const TensorInfo& detectionScores,
1299  const TensorInfo& numDetections,
1300  const DetectionPostProcessDescriptor& descriptor,
1301  Optional<std::string&> reasonIfUnsupported) const
1302 {
1303  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1304 
1305  bool supported = true;
1306 
1307  std::array<DataType,6> supportedInputTypes =
1308  {
1314  };
1315 
1316  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1317  "Reference DetectionPostProcess: input 0 is not a supported type.");
1318 
1319  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1320  "Reference DetectionPostProcess: input 1 is not a supported type.");
1321 
1322  return supported;
1323 }
1324 
1326  const TensorInfo& output,
1327  const DepthwiseConvolution2dDescriptor& descriptor,
1328  const TensorInfo& weights,
1329  const Optional<TensorInfo>& biases,
1330  Optional<std::string&> reasonIfUnsupported) const
1331 {
1332  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1333 }
1334 
1336  const TensorInfo& input1,
1337  const TensorInfo& output,
1338  Optional<std::string&> reasonIfUnsupported) const
1339 {
1340  bool supported = true;
1341 
1342  std::array<DataType,7> supportedTypes = {
1349  };
1350 
1351  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1352  "Reference division: input 0 is not a supported type.");
1353 
1354  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1355  "Reference division: input 1 is not a supported type.");
1356 
1357  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1358  "Reference division: output is not a supported type.");
1359 
1360  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1361  "Reference division: input 0 and Input 1 types are mismatched");
1362 
1363  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1364  "Reference division: input and output types are mismatched");
1365 
1366  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1367  "Reference division: shapes are not suitable for implicit broadcast.");
1368 
1369  return supported;
1370 }
1371 
1373  const TensorInfo& output,
1374  const ElementwiseUnaryDescriptor& descriptor,
1375  Optional<std::string&> reasonIfUnsupported) const
1376 {
1377  IgnoreUnused(descriptor);
1378 
1379  std::array<DataType, 7> supportedTypes =
1380  {
1387  };
1388 
1389  std::array<DataType, 1> logicalSupportedTypes =
1390  {
1392  };
1393 
1394  bool supported = true;
1395 
1396  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1397  {
1398  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1399  "Reference elementwise unary: input type not supported");
1400 
1401  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1402  "Reference elementwise unary: output type not supported");
1403  }
1404  else
1405  {
1406  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1407  "Reference elementwise unary: input type not supported");
1408 
1409  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1410  "Reference elementwise unary: output type not supported");
1411  }
1412 
1413  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1414  "Reference elementwise unary: input and output types not matching");
1415 
1416  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1417  "Reference elementwise unary: input and output shapes"
1418  "have different number of total elements");
1419 
1420  return supported;
1421 }
1422 
1424  const FakeQuantizationDescriptor& descriptor,
1425  Optional<std::string&> reasonIfUnsupported) const
1426 {
1427  IgnoreUnused(descriptor);
1428  bool supported = true;
1429 
1430  std::array<DataType,1> supportedTypes =
1431  {
1433  };
1434 
1435  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1436  "Reference fake quantization: input type not supported.");
1437 
1438  return supported;
1439 }
1440 
1442  const TensorInfo& output,
1443  const FillDescriptor& descriptor,
1444  Optional<std::string&> reasonIfUnsupported) const
1445 {
1446  IgnoreUnused(descriptor);
1447  IgnoreUnused(output);
1448 
1449  bool supported = true;
1450 
1451  std::array<DataType,3> supportedTypes =
1452  {
1456  };
1457 
1458  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1459  "Reference Fill: input type not supported.");
1460 
1461  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1462  "Reference Fill: output type not supported.");
1463  return supported;
1464 }
1465 
1467  const TensorInfo& output,
1468  Optional<std::string&> reasonIfUnsupported) const
1469 {
1470  IgnoreUnused(output);
1471  bool supported = true;
1472 
1473  std::array<DataType,3> supportedTypes =
1474  {
1477  };
1478 
1479  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1480  "Reference Floor: input type not supported.");
1481 
1482  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1483  "Reference Floor: output type not supported.");
1484 
1485  return supported;
1486 }
1487 
1489  const TensorInfo& output,
1490  const TensorInfo& weights,
1491  const TensorInfo& biases,
1492  const FullyConnectedDescriptor& descriptor,
1493  Optional<std::string&> reasonIfUnsupported) const
1494 {
1495  bool supported = true;
1496 
1497  // Define supported types.
1498  std::array<DataType,6> supportedTypes =
1499  {
1505  };
1506 
1507  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1508  "Reference Fully Connected: input type not supported.");
1509 
1510  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1511  "Reference Fully Connected: output type not supported.");
1512 
1513  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1514  "Reference Fully Connected: weights type not supported.");
1515 
1516  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1517  "Reference Fully Connected: input and output types mismatched.");
1518 
1519  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1520  "Reference Fully Connected: weights is not a supported type.");
1521 
1522  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1523  "Reference Fully Connected: input and weights types mismatched.");
1524 
1525  if (descriptor.m_BiasEnabled)
1526  {
1527  // Defined supported types for bias
1528  std::array<DataType, 5>
1529  supportedBiasTypes =
1530  {
1535  };
1536 
1537  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1538  "Reference Fully Connected: bias type not supported.");
1539 
1540  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1541  "Reference Fully Connected: bias and weight types mismatch.");
1542 
1543  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1544  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1545 
1546  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1547  "Reference Fully Connected: bias must have 1 dimension.");
1548 
1549  }
1550 
1551  return supported;
1552 }
1553 
1555  const armnn::TensorInfo& input1,
1556  const armnn::TensorInfo& output,
1557  armnn::Optional<std::string&> reasonIfUnsupported) const
1558 {
1559  bool supported = true;
1560  std::array<DataType,7> supportedTypes =
1561  {
1568  };
1569 
1570  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1571  "Reference GatherNd: input type not supported");
1572 
1573  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1574  "Reference GatherNd: output type not supported");
1575 
1576  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1577  "Reference GatherNd: indices (input1) type not supported");
1578 
1579  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1580  "Reference GatherNd: input and output types not matching");
1581 
1582  return supported;
1583 }
1584 
1586  const armnn::TensorInfo& input1,
1587  const armnn::TensorInfo& output,
1588  const GatherDescriptor& descriptor,
1589  armnn::Optional<std::string&> reasonIfUnsupported) const
1590 {
1591  bool supported = true;
1592  std::array<DataType,7> supportedTypes =
1593  {
1600  };
1601 
1602  IgnoreUnused(descriptor);
1603  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1604  "Reference Gather: input type not supported");
1605 
1606  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1607  "Reference Gather: output type not supported");
1608 
1609  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1610  "Reference Gather: indices (input1) type not supported");
1611 
1612  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1613  "Reference Gather: input and output types not matching");
1614 
1615  return supported;
1616 }
1617 
1619  Optional<std::string&> /*reasonIfUnsupported*/) const
1620 {
1621  return true;
1622 }
1623 
1625  const TensorInfo& output,
1626  const InstanceNormalizationDescriptor& descriptor,
1627  Optional<std::string&> reasonIfUnsupported) const
1628 {
1629  IgnoreUnused(descriptor);
1630  // Define supported types
1631  std::array<DataType, 3> supportedTypes =
1632  {
1635  };
1636 
1637  bool supported = true;
1638 
1639  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1640  "Reference Instance Normalization: input type not supported.");
1641 
1642  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1643  "Reference Instance Normalization: output type not supported.");
1644 
1645  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1646  "Reference Instance Normalization: input and output types mismatched.");
1647 
1648  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1649  "Reference Instance Normalization: input and output shapes have different "
1650  "num total elements.");
1651 
1652  return supported;
1653 }
1654 
1656  const TensorInfo& output,
1657  const L2NormalizationDescriptor& descriptor,
1658  Optional<std::string&> reasonIfUnsupported) const
1659 {
1660  IgnoreUnused(descriptor);
1661  // Define supported types
1662  std::array<DataType, 6> supportedTypes =
1663  {
1669  };
1670 
1671  bool supported = true;
1672 
1673  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1674  "Reference L2normalization: input type not supported.");
1675 
1676  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1677  "Reference L2normalization: output type not supported.");
1678 
1679  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1680  "Reference L2normalization: input and output types mismatched.");
1681 
1682  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1683  "Reference L2normalization: input and output shapes have different "
1684  "num total elements.");
1685 
1686  return supported;
1687 }
1688 
1690  const TensorInfo& input1,
1691  const TensorInfo& output,
1692  const LogicalBinaryDescriptor& descriptor,
1693  Optional<std::string&> reasonIfUnsupported) const
1694 {
1695  IgnoreUnused(descriptor);
1696 
1697  std::array<DataType, 1> supportedTypes =
1698  {
1700  };
1701 
1702  bool supported = true;
1703  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1704  "Reference LogicalBinary: input 0 type not supported");
1705  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1706  "Reference LogicalBinary: input 1 type not supported");
1707 
1708  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1709  "Reference LogicalBinary: input and output types do not match");
1710 
1711  return supported;
1712 }
1713 
1715  const TensorInfo& output,
1716  const LogSoftmaxDescriptor& descriptor,
1717  Optional<std::string&> reasonIfUnsupported) const
1718 {
1719  IgnoreUnused(descriptor);
1720 
1721  std::array<DataType, 3> supportedTypes =
1722  {
1725  };
1726 
1727  bool supported = true;
1728  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1729  "Reference LogSoftmax: input type not supported");
1730 
1731  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1732  "Reference LogSoftmax: output type not supported");
1733 
1734  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1735  "Reference LogSoftmax: input and output types do not match");
1736 
1737  return supported;
1738 }
1739 
1741  const TensorInfo& outputStateIn,
1742  const TensorInfo& cellStateIn,
1743  const TensorInfo& scratchBuffer,
1744  const TensorInfo& outputStateOut,
1745  const TensorInfo& cellStateOut,
1746  const TensorInfo& output,
1747  const LstmDescriptor& descriptor,
1748  const LstmInputParamsInfo& paramsInfo,
1749  Optional<std::string&> reasonIfUnsupported) const
1750 {
1751  IgnoreUnused(descriptor);
1752  IgnoreUnused(paramsInfo);
1753 
1754  bool supported = true;
1755 
1756  std::array<DataType,3> supportedTypes = {
1759  };
1760 
1761  // check inputs and outputs
1762  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1763  "Reference Lstm: input is not a supported type.");
1764  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1765  "Reference Lstm: input and outputStateIn types are mismatched");
1766  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1767  "Reference Lstm: input and cellStateIn types are mismatched");
1768  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1769  "Reference Lstm: input and scratchBuffer types are mismatched");
1770  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1771  "Reference Lstm: input and outputStateOut types are mismatched");
1772  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1773  "Reference Lstm: input and cellStateOut types are mismatched");
1774 
1775  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1776  "Reference Lstm: input and output types are mismatched");
1777  // check layer parameters
1778  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1779  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1780  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1781  "Reference Lstm: input and InputToCellWeights types are mismatched");
1782  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1783  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1784  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1785  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1786  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1787  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1788  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1789  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1790  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1791  "Reference Lstm: input and ForgetGateBias types are mismatched");
1792  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1793  "Reference Lstm: input and CellBias types are mismatched");
1794  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1795  "Reference Lstm: input and OutputGateBias types are mismatched");
1796  if (!descriptor.m_CifgEnabled)
1797  {
1798  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1799  "Reference Lstm: input and InputToInputWeights types are mismatched");
1800  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1801  reasonIfUnsupported,
1802  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1803  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1804  "Reference Lstm: input and InputGateBias types are mismatched");
1805  if (descriptor.m_PeepholeEnabled)
1806  {
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1808  reasonIfUnsupported,
1809  "Reference Lstm: input and CellToInputWeights types are mismatched");
1810  }
1811  }
1812  if (descriptor.m_PeepholeEnabled)
1813  {
1814  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1815  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1816  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1817  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1818  }
1819  if (descriptor.m_ProjectionEnabled)
1820  {
1821  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1822  "Reference Lstm: input and mProjectionWeights types are mismatched");
1823  if (paramsInfo.m_ProjectionBias != nullptr)
1824  {
1825  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1826  "Reference Lstm: input and ProjectionBias types are mismatched");
1827  }
1828  }
1829  if (descriptor.m_LayerNormEnabled)
1830  {
1831  if (!descriptor.m_CifgEnabled)
1832  {
1833  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1834  reasonIfUnsupported,
1835  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1836  }
1837  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1838  reasonIfUnsupported,
1839  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1840  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1841  reasonIfUnsupported,
1842  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1843  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1844  reasonIfUnsupported,
1845  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1846  }
1847 
1848  return supported;
1849 }
1850 
1852  const TensorInfo& input1,
1853  const TensorInfo& output,
1854  Optional<std::string&> reasonIfUnsupported) const
1855 {
1856  bool supported = true;
1857 
1858  std::array<DataType,7> supportedTypes = {
1865  };
1866 
1867  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1868  "Reference maximum: input 0 is not a supported type.");
1869 
1870  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1871  "Reference maximum: input 1 is not a supported type.");
1872 
1873  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1874  "Reference maximum: output is not a supported type.");
1875 
1876  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1877  "Reference maximum: input 0 and Input 1 types are mismatched");
1878 
1879  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1880  "Reference maximum: input and output types are mismatched");
1881 
1882  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1883  "Reference maximum: shapes are not suitable for implicit broadcast.");
1884 
1885  return supported;
1886 }
1887 
1889  const TensorInfo& output,
1890  const MeanDescriptor& descriptor,
1891  Optional<std::string&> reasonIfUnsupported) const
1892 {
1893  bool supported = true;
1894  std::string meanLayerStr = "Mean";
1895  std::string outputTensorStr = "output";
1896 
1897  std::array<DataType,6> supportedTypes =
1898  {
1904  };
1905 
1906  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1907  "Reference Mean: input type not supported.");
1908 
1909  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1910  "Reference Mean: input and output types are mismatched");
1911 
1912  if (descriptor.m_KeepDims)
1913  {
1914  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1915  reasonIfUnsupported,
1916  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1917  output.GetNumDimensions(),
1918  meanLayerStr, outputTensorStr).data());
1919  }
1920  else if (descriptor.m_Axis.empty())
1921  {
1922  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1923  reasonIfUnsupported,
1924  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1925  meanLayerStr, outputTensorStr).data());
1926  }
1927  else
1928  {
1929  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1930 
1931  if (outputDim > 0)
1932  {
1933  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1934  reasonIfUnsupported,
1935  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1936  meanLayerStr, outputTensorStr).data());
1937  }
1938  else
1939  {
1940  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1941  reasonIfUnsupported,
1942  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1943  meanLayerStr, outputTensorStr).data());
1944  }
1945  }
1946 
1947  return supported;
1948 }
1949 
1951  const TensorInfo &output,
1952  Optional<std::string &> reasonIfUnsupported) const
1953 {
1954  bool supported = true;
1955 
1956  std::array<DataType,7> supportedTypes =
1957  {
1965  };
1966 
1967  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1968  "Reference MemCopy: input type not supported");
1969 
1970  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1971  "Reference MemCopy: output type not supported");
1972 
1973  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1974  "Reference MemCopy: input and output types are mismatched");
1975 
1976  return supported;
1977 }
1978 
1980  const TensorInfo& input1,
1981  const TensorInfo& output,
1982  Optional<std::string&> reasonIfUnsupported) const
1983 {
1984  bool supported = true;
1985 
1986  std::array<DataType,7> supportedTypes = {
1993  };
1994 
1995  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1996  "Reference minimum: input 0 is not a supported type.");
1997 
1998  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1999  "Reference minimum: input 1 is not a supported type.");
2000 
2001  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2002  "Reference minimum: output is not a supported type.");
2003 
2004  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2005  "Reference minimum: input 0 and Input 1 types are mismatched");
2006 
2007  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2008  "Reference minimum: input and output types are mismatched");
2009 
2010  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2011  "Reference minimum: shapes are not suitable for implicit broadcast.");
2012 
2013  return supported;
2014 }
2015 
2017  const TensorInfo& input1,
2018  const TensorInfo& output,
2019  Optional<std::string&> reasonIfUnsupported) const
2020 {
2021  bool supported = true;
2022 
2023  std::array<DataType,7> supportedTypes = {
2030  };
2031 
2032  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2033  "Reference multiplication: input 0 is not a supported type.");
2034 
2035  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2036  "Reference multiplication: input 1 is not a supported type.");
2037 
2038  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2039  "Reference multiplication: output is not a supported type.");
2040 
2041  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2042  "Reference multiplication: input 0 and Input 1 types are mismatched");
2043 
2044  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2045  "Reference multiplication: input and output types are mismatched");
2046 
2047  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2048  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2049 
2050  return supported;
2051 }
2052 
2054  const TensorInfo& output,
2055  const NormalizationDescriptor& descriptor,
2056  Optional<std::string&> reasonIfUnsupported) const
2057 {
2058  IgnoreUnused(descriptor);
2059 
2060  // Define supported types
2061  std::array<DataType, 6> supportedTypes =
2062  {
2068  };
2069 
2070  bool supported = true;
2071 
2072  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2073  "Reference normalization: input type not supported.");
2074 
2075  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2076  "Reference normalization: output type not supported.");
2077 
2078  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2079  "Reference normalization: input and output shapes have different "
2080  "num total elements.");
2081 
2082  return supported;
2083 }
2084 
2086  Optional<std::string&> /*reasonIfUnsupported*/) const
2087 {
2088  return true;
2089 }
2090 
2092  const TensorInfo& output,
2093  const PadDescriptor& descriptor,
2094  Optional<std::string&> reasonIfUnsupported) const
2095 {
2096  IgnoreUnused(descriptor);
2097  bool supported = true;
2098 
2099  // Define supported output and inputs types.
2100  std::array<DataType,6> supportedTypes =
2101  {
2107  };
2108 
2109  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2110  "Reference pad: input is not a supported type.");
2111 
2112  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2113  "Reference pad: output is not a supported type.");
2114 
2115  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2116  "Reference pad: input and output types are mismatched.");
2117 
2118  return supported;
2119 }
2120 
2122  const TensorInfo& output,
2123  const PermuteDescriptor& descriptor,
2124  Optional<std::string&> reasonIfUnsupported) const
2125 {
2126  IgnoreUnused(descriptor);
2127  bool supported = true;
2128 
2129  // Define supported output and inputs types.
2130  std::array<DataType, 6> supportedTypes =
2131  {
2138  };
2139 
2140  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2141  "Reference permute: input is not a supported type.");
2142 
2143  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2144  "Reference permute: output is not a supported type.");
2145 
2146  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2147  "Reference permute: input and output types are mismatched.");
2148 
2149  return supported;
2150 }
2151 
2153  const TensorInfo& output,
2154  const Pooling2dDescriptor& descriptor,
2155  Optional<std::string&> reasonIfUnsupported) const
2156 {
2157  IgnoreUnused(descriptor);
2158  bool supported = true;
2159 
2160  // Define supported output and inputs types.
2161  std::array<DataType,6> supportedTypes =
2162  {
2168  };
2169 
2170  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2171  "Reference poolind2d: input is not a supported type.");
2172 
2173  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2174  "Reference poolind2d: output is not a supported type.");
2175 
2176  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2177  "Reference poolind2d: input and output types are mismatched.");
2178 
2179  return supported;
2180 }
2181 
2183  const TensorInfo& output,
2184  const Pooling3dDescriptor& descriptor,
2185  Optional<std::string&> reasonIfUnsupported) const
2186 {
2187  IgnoreUnused(descriptor);
2188  bool supported = true;
2189 
2190  // Define supported output and inputs types.
2191  std::array<DataType,6> supportedTypes =
2192  {
2198  };
2199 
2200  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2201  "Reference poolind3d: input is not a supported type.");
2202 
2203  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2204  "Reference poolind3d: output is not a supported type.");
2205 
2206  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2207  "Reference poolind3d: input and output types are mismatched.");
2208 
2209  return supported;
2210 }
2211 
2212 
2214  const TensorInfo& previousOutputIn,
2215  const TensorInfo& previousCellStateIn,
2216  const TensorInfo& outputStateOut,
2217  const TensorInfo& cellStateOut,
2218  const TensorInfo& output,
2219  const QLstmDescriptor& descriptor,
2220  const LstmInputParamsInfo& paramsInfo,
2221  Optional<std::string&> reasonIfUnsupported) const
2222 {
2223  IgnoreUnused(input);
2224  IgnoreUnused(previousOutputIn);
2225  IgnoreUnused(previousCellStateIn);
2226  IgnoreUnused(outputStateOut);
2227  IgnoreUnused(cellStateOut);
2228  IgnoreUnused(output);
2229  IgnoreUnused(descriptor);
2230  IgnoreUnused(paramsInfo);
2231 
2232  IgnoreUnused(reasonIfUnsupported);
2233 
2234  return true;
2235 }
2236 
2238  const TensorInfo& output,
2239  Optional<std::string&> reasonIfUnsupported) const
2240 {
2241  bool supported = true;
2242 
2243  // Define supported input types.
2244  std::array<DataType,7> supportedInputTypes = {
2251  };
2252 
2253  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2254  "Reference quantize: input type not supported.");
2255 
2256  // Define supported output types.
2257  std::array<DataType,4> supportedOutputTypes = {
2262  };
2263  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2264  "Reference quantize: output type not supported.");
2265 
2266  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2267  "Reference quantize: input and output shapes have different num total elements.");
2268 
2269  return supported;
2270 }
2271 
2273  const TensorInfo& output,
2274  Optional<std::string&> reasonIfUnsupported) const
2275 {
2276  IgnoreUnused(input);
2277  // Define supported output types.
2278  std::array<DataType,1> supportedOutputTypes =
2279  {
2281  };
2282 
2283  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2284  "Reference rank: input type not supported.");
2285 }
2286 
2288  const TensorInfo& output,
2289  const ReduceDescriptor& descriptor,
2290  Optional<std::string&> reasonIfUnsupported) const
2291 {
2292  IgnoreUnused(descriptor);
2293  bool supported = true;
2294  std::array<DataType,7> supportedTypes =
2295  {
2302  };
2303 
2304  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2305  "Reference Reduce: input type not supported");
2306 
2307  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2308  "Reference Reduce: output type not supported");
2309 
2310  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2311  "Reference Reduce: input and output types not matching");
2312 
2313  return supported;
2314 }
2315 
2317  const TensorInfo& output,
2318  const ReshapeDescriptor& descriptor,
2319  Optional<std::string&> reasonIfUnsupported) const
2320 {
2321  IgnoreUnused(output);
2322  IgnoreUnused(descriptor);
2323  // Define supported output types.
2324  std::array<DataType,8> supportedOutputTypes =
2325  {
2334  };
2335 
2336  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2337  "Reference reshape: input type not supported.");
2338 }
2339 
2341  const TensorInfo& output,
2342  const ResizeDescriptor& descriptor,
2343  Optional<std::string&> reasonIfUnsupported) const
2344 {
2345  IgnoreUnused(descriptor);
2346  bool supported = true;
2347  std::array<DataType,6> supportedTypes =
2348  {
2355  };
2356 
2357  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2358  "Reference Resize: input type not supported");
2359 
2360  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2361  "Reference Resize: output type not supported");
2362 
2363  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2364  "Reference Resize: input and output types not matching");
2365 
2366  return supported;
2367 }
2368 
2370  const TensorInfo& input1,
2371  const TensorInfo& output,
2372  Optional<std::string&> reasonIfUnsupported) const
2373 {
2374  bool supported = true;
2375  // ReverseV2 is data type agnostic so it can support all the types in the Reference backend
2376  std::array<DataType,8> supportedTypes =
2377  {
2386  };
2387 
2388  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2389  "Reference ReverseV2: input0 type not supported");
2390 
2391  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2392  "Reference ReverseV2: output type not supported");
2393 
2394  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2395  "Reference ReverseV2: input0 and output types not matching");
2396 
2397  std::array<DataType,6> input2SupportedTypes =
2398  {
2400  };
2401 
2402  supported &= CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
2403  "Reference ReverseV2: input1 type not supported");
2404 
2405  return supported;
2406 }
2407 
2409  const TensorInfo& output,
2410  Optional<std::string&> reasonIfUnsupported) const
2411 {
2412  IgnoreUnused(input);
2413  bool supported = true;
2414 
2415  std::array<DataType, 1> supportedTypes =
2416  {
2418  };
2419 
2420  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2421  "Reference Shape: output type not supported");
2422 
2423  return supported;
2424 }
2425 
2427  const TensorInfo& output,
2428  const SliceDescriptor& descriptor,
2429  Optional<std::string&> reasonIfUnsupported) const
2430 {
2431  IgnoreUnused(descriptor);
2432  bool supported = true;
2433 
2434  std::array<DataType, 5> supportedTypes =
2435  {
2441  };
2442 
2443  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2444  "Reference Slice: input type not supported");
2445 
2446  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2447  "Reference Slice: output type not supported");
2448 
2449  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2450  "Reference Slice: input and output types are mismatched");
2451 
2452  return supported;
2453 }
2454 
2456  const TensorInfo& output,
2457  const SoftmaxDescriptor& descriptor,
2458  Optional<std::string&> reasonIfUnsupported) const
2459 {
2460  IgnoreUnused(descriptor);
2461  bool supported = true;
2462  std::array<DataType,7> supportedTypes =
2463  {
2470  };
2471 
2472  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2473  "Reference Softmax: output type not supported");
2474 
2475  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2476  "Reference Softmax: input type not supported");
2477 
2478  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2479  "Reference Softmax: input type not supported");
2480 
2481  return supported;
2482 }
2483 
2485  const TensorInfo& output,
2486  const SpaceToBatchNdDescriptor& descriptor,
2487  Optional<std::string&> reasonIfUnsupported) const
2488 {
2489  IgnoreUnused(descriptor);
2490  bool supported = true;
2491  std::array<DataType,6> supportedTypes =
2492  {
2498  };
2499 
2500  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2501  "Reference SpaceToBatchNd: input type not supported");
2502 
2503  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2504  "Reference SpaceToBatchNd: output type not supported");
2505 
2506  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2507  "Reference SpaceToBatchNd: input and output types are mismatched");
2508 
2509  return supported;
2510 }
2511 
2513  const TensorInfo& output,
2514  const SpaceToDepthDescriptor& descriptor,
2515  Optional<std::string&> reasonIfUnsupported) const
2516 {
2517 
2518  IgnoreUnused(descriptor);
2519  bool supported = true;
2520 
2521  std::array<DataType,6> supportedTypes =
2522  {
2528  };
2529 
2530  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2531  "Reference SpaceToDepth: input type not supported");
2532 
2533  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2534  "Reference SpaceToDepth: output type not supported");
2535 
2536  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2537  "Reference SpaceToDepth: input and output types are mismatched");
2538 
2539  return supported;
2540 }
2541 
2543  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2544  const ViewsDescriptor& descriptor,
2545  Optional<std::string&> reasonIfUnsupported) const
2546 {
2547  IgnoreUnused(descriptor);
2548  bool supported = true;
2549  std::array<DataType,6> supportedTypes =
2550  {
2556  };
2557 
2558  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2559  "Reference splitter: output type not supported");
2560  for (const TensorInfo& output : outputs)
2561  {
2562  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2563  "Reference splitter: input type not supported");
2564 
2565  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2566  "Reference splitter: input and output types mismatched.");
2567  }
2568 
2569  return supported;
2570 }
2571 
2572 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2573  const TensorInfo& output,
2574  const StackDescriptor& descriptor,
2575  Optional<std::string&> reasonIfUnsupported) const
2576 {
2577  IgnoreUnused(descriptor);
2578 
2579  bool supported = true;
2580  std::array<DataType,7> supportedTypes =
2581  {
2588  };
2589 
2590  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2591  "Reference stack: output type not supported");
2592  for (const TensorInfo* input : inputs)
2593  {
2594  ARMNN_ASSERT(input != nullptr);
2595  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2596  "Reference stack: input type not supported");
2597 
2598  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2599  "Reference stack: input and output types mismatched.");
2600  }
2601 
2602  return supported;
2603 }
2604 
2606  const TensorInfo& output,
2607  const StridedSliceDescriptor& descriptor,
2608  Optional<std::string&> reasonIfUnsupported) const
2609 {
2610  IgnoreUnused(descriptor);
2611  bool supported = true;
2612 
2613  std::array<DataType,5> supportedTypes =
2614  {
2619  };
2620 
2621  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2622  "Reference StridedSlice: input type not supported");
2623 
2624  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2625  "Reference StridedSlice: output type not supported");
2626 
2627  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2628  "Reference StridedSlice: input and output types are mismatched");
2629 
2630  return supported;
2631 }
2632 
2634  const TensorInfo& input1,
2635  const TensorInfo& output,
2636  Optional<std::string&> reasonIfUnsupported) const
2637 {
2638  bool supported = true;
2639 
2640  std::array<DataType,7> supportedTypes = {
2647  };
2648 
2649  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2650  "Reference subtraction: input 0 is not a supported type.");
2651 
2652  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2653  "Reference subtraction: input 1 is not a supported type.");
2654 
2655  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2656  "Reference subtraction: output is not a supported type.");
2657 
2658  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2659  "Reference subtraction: input 0 and Input 1 types are mismatched");
2660 
2661  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2662  "Reference subtraction: input and output types are mismatched");
2663 
2664  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2665  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2666 
2667  return supported;
2668 }
2669 
2671  const TensorInfo& alpha,
2672  const TensorInfo& output,
2673  Optional<std::string&> reasonIfUnsupported) const
2674 {
2675  bool supported = true;
2676 
2677  std::array<DataType, 6> supportedTypes
2678  {
2684  };
2685 
2686  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2687  "PReLU: input is not a supported type.");
2688 
2689  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2690  "PReLU: alpha is not a supported type.");
2691 
2692  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2693  "PReLU: output is not a supported type.");
2694 
2695  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2696  "PReLU: input, alpha and output types are mismatched");
2697 
2698  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2699  "PReLU: shapes are not suitable for implicit broadcast");
2700 
2701  return supported;
2702 }
2703 
2705  const TensorInfo& output,
2706  const TileDescriptor& descriptor,
2707  Optional<std::string&> reasonIfUnsupported) const
2708 {
2709  IgnoreUnused(descriptor);
2710 
2711  bool supported = true;
2712 
2713  std::array<DataType, 7> supportedTypes
2714  {
2722  };
2723 
2724  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2725  "Tile: input type not supported.");
2726 
2727  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2728  "Tile: output type not supported");
2729 
2730  return supported;
2731 }
2732 
2734  const TensorInfo& output,
2735  const TransposeConvolution2dDescriptor& descriptor,
2736  const TensorInfo& weights,
2737  const Optional<TensorInfo>& biases,
2738  Optional<std::string&> reasonIfUnsupported) const
2739 {
2740  IgnoreUnused(descriptor);
2741  bool supported = true;
2742 
2743  std::array<DataType,7> supportedTypes =
2744  {
2751  };
2752 
2753  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2754  "Reference TransposeConvolution2d: input is not a supported type.");
2755 
2756  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2757  "Reference TransposeConvolution2d: output is not a supported type.");
2758 
2759  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2760  "Reference TransposeConvolution2d: input and output types mismatched.");
2761 
2762 
2763  const DataType inputType = input.GetDataType();
2764  if (IsQuantized8BitType(inputType))
2765  {
2766  std::array<DataType, 3> supportedWeightTypes =
2767  {
2771  };
2772 
2773  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2774  "Reference TransposeConvolution2d: weights type not supported for "
2775  "quantized input.");
2776  }
2777  else
2778  {
2779  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2780  "Reference TransposeConvolution2d: weights is not a supported type.");
2781 
2782  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2783  "Reference TransposeConvolution2d: input and weights types mismatched.");
2784  }
2785 
2786  if (biases.has_value())
2787  {
2788  std::array<DataType,4> biasesSupportedTypes =
2789  {
2793  };
2794  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2795  "Reference TransposeConvolution2d: biases is not a supported type.");
2796  }
2797 
2798  return supported;
2799 }
2800 
2802  const TensorInfo& output,
2803  const TransposeDescriptor& descriptor,
2804  Optional<std::string&> reasonIfUnsupported) const
2805 {
2806  IgnoreUnused(descriptor);
2807  bool supported = true;
2808 
2809  // Define supported output and inputs types.
2810  std::array<DataType, 6> supportedTypes =
2811  {
2818  };
2819 
2820  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2821  "Reference transpose: input is not a supported type.");
2822 
2823  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2824  "Reference transpose: output is not a supported type.");
2825 
2826  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2827  "Reference transpose: input and output types are mismatched.");
2828 
2829  return supported;
2830 }
2831 
2833  const TensorInfo& input,
2834  const TensorInfo& outputStateIn,
2835  const TensorInfo& cellStateIn,
2836  const TensorInfo& outputStateOut,
2837  const TensorInfo& cellStateOut,
2838  const TensorInfo& output,
2839  const UnidirectionalSequenceLstmDescriptor& descriptor,
2840  const LstmInputParamsInfo& paramsInfo,
2841  Optional<std::string&> reasonIfUnsupported) const
2842 {
2843  IgnoreUnused(descriptor);
2844  IgnoreUnused(paramsInfo);
2845  IgnoreUnused(outputStateIn);
2846  IgnoreUnused(cellStateIn);
2847  IgnoreUnused(outputStateOut);
2848  IgnoreUnused(cellStateOut);
2849  bool supported = true;
2850 
2851  std::array<DataType, 2> supportedTypes =
2852  {
2855  };
2856 
2857  std::array<DataType, 2> supportedWeightTypes =
2858  {
2861  };
2862 
2863  std::array<DataType, 3> supportedBiasTypes =
2864  {
2868  };
2869 
2870  // check inputs and outputs
2871  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2872  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2873  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2874  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2875 
2876  // check layer parameters
2877  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2878  reasonIfUnsupported,
2879  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2880  "is not a supported type.");
2881  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2882  reasonIfUnsupported,
2883  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2884  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2885  reasonIfUnsupported,
2886  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2887  "is not a supported type.");
2888  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2889  reasonIfUnsupported,
2890  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2891  "is not a supported type.");
2892  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2893  reasonIfUnsupported,
2894  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2895  "is not a supported type.");
2896  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2897  reasonIfUnsupported,
2898  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2899  "is not a supported type.");
2900 
2901  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2902  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2903  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2904  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2905  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2906  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2907  if (!descriptor.m_CifgEnabled)
2908  {
2909  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2910  reasonIfUnsupported,
2911  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2912  "is not a supported type.");
2913  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2914  reasonIfUnsupported,
2915  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2916  "is not a supported type.");
2917  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2918  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2919  if (descriptor.m_PeepholeEnabled)
2920  {
2921  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2922  reasonIfUnsupported,
2923  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2924  "is not a supported type.");
2925  }
2926  }
2927  if (descriptor.m_PeepholeEnabled)
2928  {
2929  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2930  reasonIfUnsupported,
2931  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2932  "is not a supported type.");
2933  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2934  reasonIfUnsupported,
2935  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2936  "is not a supported type.");
2937  }
2938  if (descriptor.m_ProjectionEnabled)
2939  {
2940  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2941  reasonIfUnsupported,
2942  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2943  "is not a supported type.");
2944  if (paramsInfo.m_ProjectionBias != nullptr)
2945  {
2946  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2947  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2948  "are mismatched");
2949  }
2950  }
2951  if (descriptor.m_LayerNormEnabled)
2952  {
2953  if (!descriptor.m_CifgEnabled)
2954  {
2955  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2956  reasonIfUnsupported,
2957  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2958  "is not a supported type.");
2959  }
2960  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2961  reasonIfUnsupported,
2962  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2963  "is not a supported type.");
2964  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2965  reasonIfUnsupported,
2966  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2967  "is not a supported type.");
2968  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2969  reasonIfUnsupported,
2970  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2971  "is not a supported type.");
2972  }
2973 
2974  return supported;
2975 }
2976 
2977 } // namespace armnn
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1740
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1189
armnn::DataType::Boolean
@ Boolean
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::LstmInputParamsInfo::GetCellBias
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2182
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::LayerType::Splitter
@ Splitter
armnn::RefLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2369
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1359
armnn::Optional
Definition: Optional.hpp:270
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2213
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:306
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnn::RefLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2704
armnn::LstmInputParamsInfo::GetInputToCellWeights
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:993
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1466
armnn::BiasAndWeightsTypesMatch
Definition: LayerSupportRules.hpp:118
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1258
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::Rule
Definition: LayerSupportRules.hpp:48
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2152
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:964
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::RefLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: RefLayerSupport.cpp:61
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:944
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1293
TypesUtils.hpp
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:615
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1618
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::LayerType::Comparison
@ Comparison
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1888
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2733
armnn::LstmInputParamsInfo::GetProjectionBias
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1541
armnn::DataType::Float32
@ Float32
armnn::ActivationFunction::TanH
@ TanH
armnn::LstmInputParamsInfo::GetInputGateBias
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
armnn::LayerType::Tile
@ Tile
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2272
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::LayerType::Stack
@ Stack
armnn::DataType::QSymmS8
@ QSymmS8
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1230
armnn::LstmInputParamsInfo::GetRecurrentToInputWeights
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
armnn::LstmInputParamsInfo::GetRecurrentToForgetWeights
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
RefLayerSupport.hpp
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2542
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1062
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:684
armnn::LstmInputParamsInfo::GetRecurrentToCellWeights
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::DataType::QSymmS16
@ QSymmS16
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2633
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2237
NumericCast.hpp
armnn::DataType::BFloat16
@ BFloat16
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:652
armnn::ShapesAreSameRank
Definition: LayerSupportRules.hpp:138
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2121
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1171
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:871
LayerSupportCommon.hpp
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:729
armnn::LstmInputParamsInfo::GetInputLayerNormWeights
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
armnn::TypeNotPerAxisQuantized
Definition: LayerSupportRules.hpp:110
armnn::LayerType::Slice
@ Slice
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1127
armnn::RefLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1325
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2512
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:548
armnn::DataType::Float16
@ Float16
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1423
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:120
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1372
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:902
armnn::LstmInputParamsInfo::GetCellToInputWeights
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
armnn::LstmInputParamsInfo::GetRecurrentToOutputWeights
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::ShapesAreBroadcastCompatible
Definition: LayerSupportRules.hpp:154
armnn::LstmInputParamsInfo::GetInputToInputWeights
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2287
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1175
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1469
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2340
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1160
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1207
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:775
armnn::ActivationFunction::Elu
@ Elu
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2484
armnn::TensorNumDimensionsAreCorrect
Definition: LayerSupportRules.hpp:181
armnn::LstmInputParamsInfo::GetForgetGateBias
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1002
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1655
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Quantize
@ Quantize
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1851
armnn::LstmInputParamsInfo::GetCellToForgetWeights
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
armnn::ActivationFunction::Linear
@ Linear
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1563
armnn::LayerType::Addition
@ Addition
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1022
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2316
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:810
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1488
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::LayerType::Division
@ Division
armnn::TypeIs
Definition: LayerSupportRules.hpp:102
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2085
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1130
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ActivationFunction::Abs
@ Abs
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:953
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: RefLayerSupport.cpp:1689
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1335
armnn::BiasAndWeightsTypesCompatible
Definition: LayerSupportRules.hpp:126
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2053
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1081
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1282
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1169
armnn::LstmInputParamsInfo::GetInputToOutputWeights
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1125
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2670
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1441
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1497
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LstmInputParamsInfo::GetOutputGateBias
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1131
armnn::LstmInputParamsInfo::GetCellToOutputWeights
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2016
armnn::LayerType::Minimum
@ Minimum
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2605
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1950
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::LstmInputParamsInfo::GetOutputLayerNormWeights
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1419
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:935
armnn::TypesAreEqual
Definition: LayerSupportRules.hpp:72
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::LayerType::Rank
@ Rank
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:839
armnn::LayerType::Mean
@ Mean
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1129
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2408
armnn::ActivationFunction::Square
@ Square
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2801
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:713
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:973
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1585
armnn::LstmInputParamsInfo::GetProjectionWeights
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
LayerSupportRules.hpp
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2572
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:38
armnn::DataType::Signed64
@ Signed64
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: RefLayerSupport.cpp:1714
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::TypeAnyOf
Definition: LayerSupportRules.hpp:90
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1517
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2455
armnn::FakeQuantizationDescriptor
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Definition: Descriptors.hpp:906
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2426
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::LayerType::Unmap
@ Unmap
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1151
armnn::LstmInputParamsInfo::GetForgetLayerNormWeights
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2091
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1554
armnn::TileDescriptor
Definition: Descriptors.hpp:1619
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2832
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1624
armnn::LstmInputParamsInfo::GetCellLayerNormWeights
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ShapesAreSameTotalSize
Definition: LayerSupportRules.hpp:146
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1054
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::TensorNumDimensionsAreGreaterOrEqualTo
Definition: LayerSupportRules.hpp:189
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1979
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::LstmInputParamsInfo::GetInputToForgetWeights
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
armnn::ActivationFunction::Sigmoid
@ Sigmoid