ArmNN
 22.08
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74  reasonIfUnsupported);
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81  reasonIfUnsupported);
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
87  reasonIfUnsupported);
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
97  reasonIfUnsupported);
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102  reasonIfUnsupported);
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
108  reasonIfUnsupported);
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
128  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
130  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
132  {
133  if (infos.size() != 4)
134  {
135  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
136  "TensorInfos should be of format: {input, output, weights, biases}.");
137  }
138 
139  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
140  if (infos[3] == TensorInfo())
141  {
142  return IsConvolution2dSupported(infos[0],
143  infos[1],
144  desc,
145  infos[2],
146  EmptyOptional(),
147  reasonIfUnsupported);
148  }
149  else
150  {
151  return IsConvolution2dSupported(infos[0],
152  infos[1],
153  desc,
154  infos[2],
155  infos[3],
156  reasonIfUnsupported);
157  }
158  }
160  return IsDepthToSpaceSupported(infos[0],
161  infos[1],
162  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
163  reasonIfUnsupported);
165  {
166  if (infos.size() != 4)
167  {
168  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
169  "TensorInfos should be of format: {input, output, weights, biases}.");
170  }
171 
172  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
173  if (infos[3] == TensorInfo())
174  {
175  return IsDepthwiseConvolutionSupported(infos[0],
176  infos[1],
177  desc,
178  infos[2],
179  EmptyOptional(),
180  reasonIfUnsupported);
181  }
182  else
183  {
184  return IsDepthwiseConvolutionSupported(infos[0],
185  infos[1],
186  desc,
187  infos[2],
188  infos[3],
189  reasonIfUnsupported);
190  }
191  }
193  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
194  case LayerType::Division:
195  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
197  return IsElementwiseUnarySupported(infos[0],
198  infos[1],
199  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
200  reasonIfUnsupported);
201  case LayerType::Fill:
202  return IsFillSupported(infos[0],
203  infos[1],
204  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
205  reasonIfUnsupported);
206  case LayerType::Floor:
207  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
209  return IsFullyConnectedSupported(infos[0],
210  infos[1],
211  infos[2],
212  infos[3],
213  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
214  reasonIfUnsupported);
215  case LayerType::Gather:
216  return IsGatherSupported(infos[0],
217  infos[1],
218  infos[2],
219  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
220  reasonIfUnsupported);
221  case LayerType::GatherNd:
222  return IsGatherNdSupported(infos[0],
223  infos[1],
224  infos[2],
225  reasonIfUnsupported);
226  case LayerType::Input:
227  return IsInputSupported(infos[0], reasonIfUnsupported);
229  return IsInstanceNormalizationSupported(infos[0],
230  infos[1],
231  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
232  (&descriptor)),
233  reasonIfUnsupported);
235  return IsL2NormalizationSupported(infos[0],
236  infos[1],
237  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
238  reasonIfUnsupported);
240  return IsLogicalBinarySupported(infos[0],
241  infos[1],
242  infos[2],
243  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
244  reasonIfUnsupported);
246  return IsLogSoftmaxSupported(infos[0],
247  infos[1],
248  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
249  reasonIfUnsupported);
250  case LayerType::Lstm:
251  return IsLstmSupported(infos[0],
252  infos[1],
253  infos[2],
254  infos[3],
255  infos[4],
256  infos[5],
257  infos[6],
258  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
259  lstmParamsInfo.value(),
261  case LayerType::QLstm:
262  return IsQLstmSupported(infos[0],
263  infos[1],
264  infos[2],
265  infos[3],
266  infos[4],
267  infos[5],
268  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
269  lstmParamsInfo.value(),
271  case LayerType::Maximum:
272  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
273  case LayerType::Mean:
274  return IsMeanSupported(infos[0],
275  infos[1],
276  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
277  reasonIfUnsupported);
278  case LayerType::Minimum:
279  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
281  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
283  return IsNormalizationSupported(infos[0],
284  infos[1],
285  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
286  reasonIfUnsupported);
287  case LayerType::Output:
288  return IsOutputSupported(infos[0], reasonIfUnsupported);
289  case LayerType::Pad:
290  return IsPadSupported(infos[0],
291  infos[1],
292  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
293  reasonIfUnsupported);
294  case LayerType::Permute:
295  return IsPermuteSupported(infos[0],
296  infos[1],
297  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
298  reasonIfUnsupported);
300  return IsPooling2dSupported(infos[0],
301  infos[1],
302  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
303  reasonIfUnsupported);
304  case LayerType::Prelu:
305  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
306  case LayerType::Quantize:
307  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
308  case LayerType::Reshape:
309  return IsReshapeSupported(infos[0],
310  infos[1],
311  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
312  reasonIfUnsupported);
313  case LayerType::Resize:
314  return IsResizeSupported(infos[0],
315  infos[1],
316  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
317  reasonIfUnsupported);
318  case LayerType::Reduce:
319  return IsReduceSupported(infos[0],
320  infos[1],
321  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
322  reasonIfUnsupported);
323  case LayerType::Slice:
324  return IsSliceSupported(infos[0],
325  infos[1],
326  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
327  reasonIfUnsupported);
328  case LayerType::Softmax:
329  return IsSoftmaxSupported(infos[0],
330  infos[1],
331  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
332  reasonIfUnsupported);
334  return IsSpaceToBatchNdSupported(infos[0],
335  infos[1],
336  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
337  reasonIfUnsupported);
339  return IsSpaceToDepthSupported(infos[0],
340  infos[1],
341  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
342  reasonIfUnsupported);
343  case LayerType::Splitter:
344  {
345  std::vector<TensorInfo> outputInfos;
346  for (uint32_t i = 1; i < infos.size(); i++)
347  {
348  outputInfos.push_back(infos[i]);
349  }
350  return IsSplitterSupported(infos[0],
351  {outputInfos.begin(), outputInfos.end()},
352  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
354  }
355  case LayerType::Stack:
356  {
357  std::vector<const TensorInfo*> inputInfos;
358  for (uint32_t i = 0; i < infos.size() - 1; i++)
359  {
360  inputInfos.push_back(&infos[i]);
361  }
362  return IsStackSupported(inputInfos,
363  infos[infos.size() - 1],
364  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
366  }
368  return IsStridedSliceSupported(infos[0],
369  infos[1],
370  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
371  reasonIfUnsupported);
373  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
375  return IsTransposeSupported(infos[0],
376  infos[1],
377  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
378  reasonIfUnsupported);
380  {
381  if (infos.size() != 4)
382  {
383  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
384  "TensorInfos should be of format: {input, output, weights, biases}.");
385  }
386 
387  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
388  if (infos[3] == TensorInfo())
389  {
390  return IsTransposeConvolution2dSupported(infos[0],
391  infos[1],
392  desc,
393  infos[2],
394  EmptyOptional(),
395  reasonIfUnsupported);
396  }
397  else
398  {
399  return IsTransposeConvolution2dSupported(infos[0],
400  infos[1],
401  desc,
402  infos[2],
403  infos[3],
404  reasonIfUnsupported);
405  }
406  }
407  case LayerType::Cast:
408  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
410  return IsChannelShuffleSupported(infos[0],
411  infos[1],
412  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
413  reasonIfUnsupported);
415  {
416  if (infos.size() != 4)
417  {
418  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
419  "TensorInfos should be of format: {input, output, weights, biases}.");
420  }
421 
422  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
423  if (infos[3] == TensorInfo())
424  {
425  return IsConvolution3dSupported(infos[0],
426  infos[1],
427  desc,
428  infos[2],
429  EmptyOptional(),
430  reasonIfUnsupported);
431  }
432  else
433  {
434  return IsConvolution3dSupported(infos[0],
435  infos[1],
436  desc,
437  infos[2],
438  infos[3],
439  reasonIfUnsupported);
440  }
441  }
442  case LayerType::Debug:
443  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
445  return IsDetectionPostProcessSupported(infos[0],
446  infos[1],
447  infos[2],
448  infos[3],
449  infos[4],
450  infos[5],
451  infos[6],
452  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
453  (&descriptor)),
454  reasonIfUnsupported);
456  return IsFakeQuantizationSupported(infos[0],
457  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
458  reasonIfUnsupported);
459  case LayerType::MemCopy:
460  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
461  case LayerType::Rank:
462  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
463  case LayerType::Shape:
464  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
466  {
467  if (infos.size() != 6)
468  {
469  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
470  "should be of format: {input, outputStateIn, cellStateIn, "
471  "hiddenStateOutputVal, cellStateOutputVal, output}");
472  }
473  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
475  infos[1],
476  infos[2],
477  infos[3],
478  infos[4],
479  infos[5],
480  desc,
481  lstmParamsInfo.value(),
483  }
485  return IsPooling3dSupported(infos[0],
486  infos[1],
487  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
488  reasonIfUnsupported);
489  case LayerType::Map:
490  return true;
491  case LayerType::Unmap:
492  return true;
494  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
495  case LayerType::Merge:
496  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
499  infos[1],
500  infos[2],
501  infos[3],
502  infos[4],
503  quantizedLstmInputParamsInfo.value(),
505  default:
506  // layers not supported in neon by default:
507  // precompiled, standin, switch
508  return false;
509  }
510 }
511 
513  const TensorInfo& output,
516 {
517  bool supported = true;
518 
519  // Define supported types.
520  std::array<DataType,6> supportedTypes = {
527  };
528 
529  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
530  "Reference activation: input type not supported.");
531 
532  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
533  "Reference activation: output type not supported.");
534 
535  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
536  "Reference activation: input and output types mismatched.");
537 
538  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
539  "Reference activation: input and output shapes are of different rank.");
540 
541 
542  struct ActivationFunctionSupported : public Rule
543  {
544  ActivationFunctionSupported(const ActivationDescriptor& desc)
545  {
546  switch(desc.m_Function)
547  {
560  {
561  m_Res = true;
562  break;
563  }
564  default:
565  {
566  m_Res = false;
567  break;
568  }
569  }
570  }
571  };
572 
573  // Function is supported
574  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
575  "Reference activation: function not supported.");
576 
577  return supported;
578 }
579 
581  const TensorInfo& input1,
582  const TensorInfo& output,
584 {
585  bool supported = true;
586 
587  std::array<DataType,7> supportedTypes = {
595  };
596 
597  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
598  "Reference addition: input 0 is not a supported type.");
599 
600  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
601  "Reference addition: input 1 is not a supported type.");
602 
603  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
604  "Reference addition: output is not a supported type.");
605 
606  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
607  "Reference addition: input 0 and Input 1 types are mismatched");
608 
609  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
610  "Reference addition: input and output types are mismatched");
611 
612  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
613  "Reference addition: shapes are not suitable for implicit broadcast.");
614 
615  return supported;
616 }
617 
621 {
622  IgnoreUnused(descriptor);
623 
624  std::array<DataType, 8> supportedInputTypes =
625  {
634  };
635 
636  std::array<DataType,2> supportedOutputTypes = {
638  DataType::Signed64
639  };
640 
641  bool supported = true;
642 
643  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
644  "Reference ArgMinMax: input is not a supported type.");
645  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
646  "Reference ArgMinMax: output type not supported");
647 
648  return supported;
649 }
650 
652  const TensorInfo& inputY,
653  const TensorInfo& output,
656 {
657  IgnoreUnused(descriptor);
658 
659  std::array<DataType, 6> supportedTypes =
660  {
667  };
668 
669  bool supported = true;
670 
671  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
672  "Reference batch matrix multiplication: input X is not a supported type");
673 
674  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
675  "Reference batch matrix multiplication: input Y is not a supported type");
676 
677  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
678  "Reference batch matrix multiplication: output is not a supported type");
679 
680  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
681  "Reference batch matrix multiplication: input X and input Y types are mismatched");
682 
683  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
684  "Reference batch matrix multiplication: inputs and output types are mismatched");
685 
687  reasonIfUnsupported,
688  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
689 
691  reasonIfUnsupported,
692  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
693 
694  return supported;
695 }
696 
698  const TensorInfo& output,
699  const TensorInfo& mean,
700  const TensorInfo& variance,
701  const TensorInfo& beta,
702  const TensorInfo& gamma,
705 {
706  IgnoreUnused(descriptor);
707 
708  std::array<DataType, 6> supportedTypes =
709  {
716  };
717 
718  bool supported = true;
719 
720  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
721  "Reference batch normalization: input is not a supported type.");
722 
723  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
724  "Reference batch normalization: output is not a supported type.");
725 
726  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
727  "Reference batch normalization: input and output types are mismatched");
728 
729  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
730  "Reference batch normalization: mean is not a supported type.");
731 
732  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
733  "Reference batch normalization: variance is not a supported type.");
734 
735  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
736  "Reference batch normalization: beta is not a supported type.");
737 
738  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
739  "Reference batch normalization: gamma is not a supported type.");
740 
741  return supported;
742 }
743 
745  const TensorInfo& output,
748 {
749  IgnoreUnused(descriptor);
750 
751  bool supported = true;
752 
753  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
754  std::string inputTensorStr = "input";
755  std::string outputTensorStr = "output";
756 
757  // Define supported types.
758  std::array<DataType,6> supportedTypes =
759  {
766  };
767 
768  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
769  "Reference BatchToSpaceNd: input type not supported.");
770 
771  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
772  "Reference BatchToSpaceNd: output type not supported.");
773 
774  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
775  "Reference BatchToSpaceNd: input and output types mismatched.");
776 
777  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
778  reasonIfUnsupported,
779  CreateIncorrectDimensionsErrorMsg(4,
780  output.GetNumDimensions(),
781  batchToSpaceNdLayerStr,
782  outputTensorStr).data());
783 
784  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
785  reasonIfUnsupported,
786  CreateIncorrectDimensionsErrorMsg(4,
787  input.GetNumDimensions(),
788  batchToSpaceNdLayerStr,
789  inputTensorStr).data());
790 
791  return supported;
792 }
793 
795  const TensorInfo& output,
797 {
798  std::array<DataType, 9> supportedInputTypes =
799  {
808  };
809 
810  bool supported = true;
811  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
812  "Reference cast: input is not a supported type");
813 
814 
815  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
816  "Reference cast: output is not a supported type");
817 
818  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
819  "Reference cast: input and output shapes have different number of total elements");
820 
821  return supported;
822 }
823 
825  const TensorInfo& output,
828 {
829  IgnoreUnused(descriptor);
830  bool supported = true;
831 
832  // Define supported output and inputs types.
833  std::array<DataType, 7> supportedTypes =
834  {
842  };
843 
844  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
845  "Reference ChannelShuffle: input is not a supported type.");
846 
847  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
848  "Reference ChannelShuffle: output is not a supported type.");
849 
850  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
851  "Reference ChannelShuffle: input and output types are mismatched.");
852 
853  return supported;
854 }
855 
856 
858  const TensorInfo& input1,
859  const TensorInfo& output,
862 {
863  IgnoreUnused(descriptor);
864  std::array<DataType, 8> supportedInputTypes =
865  {
874  };
875 
876  bool supported = true;
877  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
878  "Reference comparison: input 0 is not a supported type");
879 
880  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
881  "Reference comparison: input 0 and Input 1 types are mismatched");
882 
883  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
884  "Reference comparison: output is not of type Boolean");
885 
886  return supported;
887 }
888 
889 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
890  const TensorInfo& output,
893 {
894  IgnoreUnused(descriptor);
895 
896  bool supported = true;
897  std::array<DataType,7> supportedTypes =
898  {
906  };
907 
908  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
909  "Reference concatenation: output type not supported");
910  for (const TensorInfo* input : inputs)
911  {
912  ARMNN_ASSERT(input != nullptr);
913  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
914  "Reference concatenation: input type not supported");
915 
916  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
917  "Reference concatenation: input and output types mismatched.");
918  }
919 
920  return supported;
921 }
922 
925 {
926  std::array<DataType,8> supportedTypes =
927  {
936  };
937 
938  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
939  "Reference constant: output is not a supported type.");
940 }
941 
943  const TensorInfo& output,
945 {
946  bool supported = true;
947 
948  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
949  "Reference for ConvertBf16ToFp32 layer: input type not supported");
950 
951  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
952  "Reference for ConvertBf16ToFp32 layer: output type not supported");
953 
954  return supported;
955 }
956 
958  const TensorInfo& output,
960 {
961  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
962  input.GetDataType(),
963  &TrueFunc<>,
964  &FalseInputFuncF32<>,
965  &FalseFuncU8<>,
966  &FalseFuncI32<>,
967  &FalseFuncU8<>) &&
968  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
969  output.GetDataType(),
970  &FalseOutputFuncF16<>,
971  &TrueFunc<>,
972  &FalseFuncU8<>,
973  &FalseFuncI32<>,
974  &FalseFuncU8<>));
975 }
976 
978  const TensorInfo& output,
980 {
981  bool supported = true;
982 
983  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
984  "Reference for ConvertFp32ToBf16 layer: input type not supported");
985 
986  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
987  "Reference for ConvertFp32ToBf16 layer: output type not supported");
988 
989  return supported;
990 }
991 
993  const TensorInfo& output,
995 {
996  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
997  input.GetDataType(),
998  &FalseInputFuncF16<>,
999  &TrueFunc<>,
1000  &FalseFuncU8<>,
1001  &FalseFuncI32<>,
1002  &FalseFuncU8<>) &&
1003  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1004  output.GetDataType(),
1005  &TrueFunc<>,
1006  &FalseOutputFuncF32<>,
1007  &FalseFuncU8<>,
1008  &FalseFuncI32<>,
1009  &FalseFuncU8<>));
1010 }
1011 
1013  const TensorInfo& output,
1015  const TensorInfo& weights,
1018 {
1019  bool supported = true;
1020 
1021  // Define supported types.
1022  std::array<DataType,7> supportedTypes =
1023  {
1031  };
1032 
1033  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1034  "Reference Convolution2d: input is not a supported type.");
1035 
1036  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1037  "Reference Convolution2d: output is not a supported type.");
1038 
1039  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1040  if (input.GetDataType() == DataType::BFloat16)
1041  {
1042  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
1043  {
1044  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1045  supported = false;
1046  }
1047  }
1048  else
1049  {
1050  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1051  "Reference Convolution2d: input and output types mismatched.");
1052  }
1053 
1054  const DataType inputType = input.GetDataType();
1055  if (IsQuantized8BitType(inputType))
1056  {
1057  std::array<DataType, 3> supportedWeightTypes =
1058  {
1062  };
1063 
1064  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1065  "Reference Convolution2d: weights type not supported for quantized input.");
1066  }
1067  else
1068  {
1069  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1070  "Reference Convolution2d: weights is not a supported type.");
1071 
1072  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1073  "Reference Convolution2d: input and weights types mismatched.");
1074  }
1075 
1076  if (biases.has_value())
1077  {
1078  std::array<DataType,4> biasesSupportedTypes =
1079  {
1084  };
1085 
1086  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1087  "Reference Convolution2d: biases is not a supported type.");
1088  }
1089  IgnoreUnused(descriptor);
1090 
1091  return supported;
1092 }
1093 
1095  const TensorInfo& output,
1097  const TensorInfo& weights,
1100 {
1101  bool supported = true;
1102 
1103  // Define supported types.
1104  std::array<DataType,7> supportedTypes =
1105  {
1113  };
1114 
1115  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1116  "Reference Convolution3d: input is not a supported type.");
1117 
1118  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1119  "Reference Convolution3d: output is not a supported type.");
1120 
1121  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1122  "Reference Convolution3d: input and output types mismatched.");
1123 
1124  const DataType inputType = input.GetDataType();
1125  if (IsQuantized8BitType(inputType))
1126  {
1127  std::array<DataType, 3> supportedWeightTypes =
1128  {
1132  };
1133 
1134  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1135  "Reference Convolution3d: weights type not supported for quantized input.");
1136  }
1137  else
1138  {
1139  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1140  "Reference Convolution3d: weights is not a supported type.");
1141 
1142  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1143  "Reference Convolution3d: input and weights types mismatched.");
1144  }
1145 
1146  if (biases.has_value())
1147  {
1148  std::array<DataType,4> biasesSupportedTypes =
1149  {
1154  };
1155 
1156  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1157  "Reference Convolution3d: biases is not a supported type.");
1158  }
1159  IgnoreUnused(descriptor);
1160 
1161  return supported;
1162 }
1163 
1165  const TensorInfo& output,
1167 {
1168  bool supported = true;
1169 
1170  std::array<DataType, 8> supportedTypes =
1171  {
1180  };
1181 
1182  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1183  "Reference for Debug layer: input type not supported");
1184 
1185  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1186  "Reference for Debug layer: output type not supported");
1187 
1188  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1189  "Reference for Debug layer: input and output types are mismatched");
1190 
1191  return supported;
1192 }
1193 
1195  const TensorInfo& output,
1198 {
1199  IgnoreUnused(descriptor);
1200  bool supported = true;
1201 
1202  std::array<DataType,6> supportedTypes =
1203  {
1210  };
1211 
1212  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1213  "Reference DepthToSpace: input type not supported");
1214 
1215  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1216  "Reference DepthToSpace: output type not supported");
1217 
1218  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1219  "Reference DepthToSpace: input and output types are mismatched");
1220 
1221  return supported;
1222 }
1223 
1225  const TensorInfo& output,
1227  const TensorInfo& weights,
1230 {
1231  IgnoreUnused(descriptor);
1232  bool supported = true;
1233 
1234  // Define supported types.
1235  std::array<DataType,7> supportedTypes =
1236  {
1244  };
1245 
1246  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1247  "Reference DepthwiseConvolution2d: input is not a supported type.");
1248 
1249  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1250  "Reference DepthwiseConvolution2d: output is not a supported type.");
1251 
1252  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1253  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1254 
1255  const DataType inputType = input.GetDataType();
1256  if (IsQuantized8BitType(inputType))
1257  {
1258  std::array<DataType, 3> supportedWeightTypes =
1259  {
1263  };
1264 
1265  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1266  "Reference DepthwiseConvolution2d: weights type not supported for "
1267  "quantized input.");
1268  }
1269  else
1270  {
1271  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1272  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1273 
1274  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1275  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1276  }
1277 
1278  if (biases.has_value())
1279  {
1280  std::array<DataType,4> biasesSupportedTypes =
1281  {
1286  };
1287  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1288  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1289  }
1290 
1291  return supported;
1292 
1293 }
1294 
1296  const TensorInfo& output,
1298 {
1299  bool supported = true;
1300 
1301  std::array<DataType,5> supportedInputTypes = {
1307  };
1308 
1309  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1310  "Reference for Dequantize layer: input type not supported.");
1311 
1312  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1313  "Reference for Dequantize layer: per-axis quantized input not supported.");
1314 
1315  std::array<DataType,3> supportedOutputTypes = {
1318  DataType::Float16
1319  };
1320 
1321  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1322  "Reference for Dequantize layer: output type not supported.");
1323 
1324  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1325  "Reference for Dequantize layer: input/output shapes have different num total "
1326  "elements.");
1327 
1328  return supported;
1329 }
1330 
1332  const TensorInfo& scores,
1333  const TensorInfo& anchors,
1334  const TensorInfo& detectionBoxes,
1336  const TensorInfo& detectionScores,
1337  const TensorInfo& numDetections,
1340 {
1341  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1342 
1343  bool supported = true;
1344 
1345  std::array<DataType,6> supportedInputTypes =
1346  {
1353  };
1354 
1355  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1356  "Reference DetectionPostProcess: input 0 is not a supported type.");
1357 
1358  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1359  "Reference DetectionPostProcess: input 1 is not a supported type.");
1360 
1361  return supported;
1362 }
1363 
1365  const TensorInfo& output,
1367  const TensorInfo& weights,
1370 {
1371  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1372 }
1373 
1375  const TensorInfo& input1,
1376  const TensorInfo& output,
1378 {
1379  bool supported = true;
1380 
1381  std::array<DataType,7> supportedTypes = {
1389  };
1390 
1391  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1392  "Reference division: input 0 is not a supported type.");
1393 
1394  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1395  "Reference division: input 1 is not a supported type.");
1396 
1397  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1398  "Reference division: output is not a supported type.");
1399 
1400  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1401  "Reference division: input 0 and Input 1 types are mismatched");
1402 
1403  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1404  "Reference division: input and output types are mismatched");
1405 
1406  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1407  "Reference division: shapes are not suitable for implicit broadcast.");
1408 
1409  return supported;
1410 }
1411 
1413  const TensorInfo& output,
1416 {
1417  IgnoreUnused(descriptor);
1418 
1419  std::array<DataType, 7> supportedTypes =
1420  {
1428  };
1429 
1430  std::array<DataType, 1> logicalSupportedTypes =
1431  {
1433  };
1434 
1435  bool supported = true;
1436 
1437  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1438  {
1439  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1440  "Reference elementwise unary: input type not supported");
1441 
1442  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1443  "Reference elementwise unary: output type not supported");
1444  }
1445  else
1446  {
1447  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1448  "Reference elementwise unary: input type not supported");
1449 
1450  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1451  "Reference elementwise unary: output type not supported");
1452  }
1453 
1454  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1455  "Reference elementwise unary: input and output types not matching");
1456 
1457  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1458  "Reference elementwise unary: input and output shapes"
1459  "have different number of total elements");
1460 
1461  return supported;
1462 }
1463 
1467 {
1468  IgnoreUnused(descriptor);
1469  bool supported = true;
1470 
1471  std::array<DataType,1> supportedTypes =
1472  {
1474  };
1475 
1476  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1477  "Reference fake quantization: input type not supported.");
1478 
1479  return supported;
1480 }
1481 
1483  const TensorInfo& output,
1484  const FillDescriptor& descriptor,
1486 {
1487  IgnoreUnused(descriptor);
1488  IgnoreUnused(output);
1489 
1490  bool supported = true;
1491 
1492  std::array<DataType,3> supportedTypes =
1493  {
1497  };
1498 
1499  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1500  "Reference Fill: input type not supported.");
1501 
1502  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1503  "Reference Fill: output type not supported.");
1504  return supported;
1505 }
1506 
1508  const TensorInfo& output,
1510 {
1511  IgnoreUnused(output);
1512  bool supported = true;
1513 
1514  std::array<DataType,3> supportedTypes =
1515  {
1519  };
1520 
1521  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1522  "Reference Floor: input type not supported.");
1523 
1524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1525  "Reference Floor: output type not supported.");
1526 
1527  return supported;
1528 }
1529 
1531  const TensorInfo& output,
1532  const TensorInfo& weights,
1533  const TensorInfo& biases,
1536 {
1537  bool supported = true;
1538 
1539  // Define supported types.
1540  std::array<DataType,6> supportedTypes =
1541  {
1548  };
1549 
1550  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1551  "Reference Fully Connected: input type not supported.");
1552 
1553  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1554  "Reference Fully Connected: output type not supported.");
1555 
1556  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1557  "Reference Fully Connected: weights type not supported.");
1558 
1559  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1560  if (input.GetDataType() == DataType::BFloat16)
1561  {
1562  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
1563  {
1564  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1565  supported = false;
1566  }
1567  }
1568  else
1569  {
1570  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1571  "Reference Fully Connected: input and output types mismatched.");
1572  }
1573 
1574  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1575  "Reference Fully Connected: weights is not a supported type.");
1576 
1577  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1578  "Reference Fully Connected: input and weights types mismatched.");
1579 
1580  if (descriptor.m_BiasEnabled)
1581  {
1582  // Defined supported types for bias
1583  std::array<DataType, 5>
1584  supportedBiasTypes =
1585  {
1591  };
1592 
1593  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1594  "Reference Fully Connected: bias type not supported.");
1595 
1596  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1597  "Reference Fully Connected: bias and weight types mismatch.");
1598 
1599  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1600  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1601 
1602  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1603  "Reference Fully Connected: bias must have 1 dimension.");
1604 
1605  }
1606 
1607  return supported;
1608 }
1609 
1611  const armnn::TensorInfo& input1,
1612  const armnn::TensorInfo& output,
1614 {
1615  bool supported = true;
1616  std::array<DataType,7> supportedTypes =
1617  {
1625  };
1626 
1627  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1628  "Reference GatherNd: input type not supported");
1629 
1630  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1631  "Reference GatherNd: output type not supported");
1632 
1633  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1634  "Reference GatherNd: indices (input1) type not supported");
1635 
1636  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1637  "Reference GatherNd: input and output types not matching");
1638 
1639  return supported;
1640 }
1641 
1643  const armnn::TensorInfo& input1,
1644  const armnn::TensorInfo& output,
1647 {
1648  bool supported = true;
1649  std::array<DataType,7> supportedTypes =
1650  {
1658  };
1659 
1660  if (descriptor.m_Axis != 0)
1661  {
1662  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1663  supported &= false;
1664  }
1665  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1666  "Reference Gather: input type not supported");
1667 
1668  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1669  "Reference Gather: output type not supported");
1670 
1671  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1672  "Reference Gather: indices (input1) type not supported");
1673 
1674  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1675  "Reference Gather: input and output types not matching");
1676 
1677  return supported;
1678 }
1679 
1681  Optional<std::string&> /*reasonIfUnsupported*/) const
1682 {
1683  return true;
1684 }
1685 
1687  const TensorInfo& output,
1690 {
1691  IgnoreUnused(descriptor);
1692  // Define supported types
1693  std::array<DataType, 3> supportedTypes =
1694  {
1698  };
1699 
1700  bool supported = true;
1701 
1702  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1703  "Reference Instance Normalization: input type not supported.");
1704 
1705  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1706  "Reference Instance Normalization: output type not supported.");
1707 
1708  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1709  "Reference Instance Normalization: input and output types mismatched.");
1710 
1711  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1712  "Reference Instance Normalization: input and output shapes have different "
1713  "num total elements.");
1714 
1715  return supported;
1716 }
1717 
1719  const TensorInfo& output,
1722 {
1723  IgnoreUnused(descriptor);
1724  // Define supported types
1725  std::array<DataType, 6> supportedTypes =
1726  {
1733  };
1734 
1735  bool supported = true;
1736 
1737  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1738  "Reference L2normalization: input type not supported.");
1739 
1740  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1741  "Reference L2normalization: output type not supported.");
1742 
1743  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1744  "Reference L2normalization: input and output types mismatched.");
1745 
1746  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1747  "Reference L2normalization: input and output shapes have different "
1748  "num total elements.");
1749 
1750  return supported;
1751 }
1752 
1754  const TensorInfo& input1,
1755  const TensorInfo& output,
1758 {
1759  IgnoreUnused(descriptor);
1760 
1761  std::array<DataType, 1> supportedTypes =
1762  {
1764  };
1765 
1766  bool supported = true;
1767  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1768  "Reference LogicalBinary: input 0 type not supported");
1769  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1770  "Reference LogicalBinary: input 1 type not supported");
1771 
1772  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1773  "Reference LogicalBinary: input and output types do not match");
1774 
1775  return supported;
1776 }
1777 
1779  const TensorInfo& output,
1782 {
1783  IgnoreUnused(descriptor);
1784 
1785  std::array<DataType, 3> supportedTypes =
1786  {
1790  };
1791 
1792  bool supported = true;
1793  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1794  "Reference LogSoftmax: input type not supported");
1795 
1796  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1797  "Reference LogSoftmax: output type not supported");
1798 
1799  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1800  "Reference LogSoftmax: input and output types do not match");
1801 
1802  return supported;
1803 }
1804 
1806  const TensorInfo& outputStateIn,
1807  const TensorInfo& cellStateIn,
1808  const TensorInfo& scratchBuffer,
1809  const TensorInfo& outputStateOut,
1810  const TensorInfo& cellStateOut,
1811  const TensorInfo& output,
1812  const LstmDescriptor& descriptor,
1815 {
1816  IgnoreUnused(descriptor);
1817  IgnoreUnused(paramsInfo);
1818 
1819  bool supported = true;
1820 
1821  std::array<DataType,3> supportedTypes = {
1825  };
1826 
1827  // check inputs and outputs
1828  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1829  "Reference Lstm: input is not a supported type.");
1830  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1831  "Reference Lstm: input and outputStateIn types are mismatched");
1832  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1833  "Reference Lstm: input and cellStateIn types are mismatched");
1834  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1835  "Reference Lstm: input and scratchBuffer types are mismatched");
1836  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1837  "Reference Lstm: input and outputStateOut types are mismatched");
1838  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1839  "Reference Lstm: input and cellStateOut types are mismatched");
1840 
1841  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1842  "Reference Lstm: input and output types are mismatched");
1843  // check layer parameters
1844  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1845  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1846  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1847  "Reference Lstm: input and InputToCellWeights types are mismatched");
1848  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1849  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1850  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1851  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1852  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1853  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1854  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1855  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1856  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1857  "Reference Lstm: input and ForgetGateBias types are mismatched");
1858  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1859  "Reference Lstm: input and CellBias types are mismatched");
1860  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1861  "Reference Lstm: input and OutputGateBias types are mismatched");
1862  if (!descriptor.m_CifgEnabled)
1863  {
1864  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1865  "Reference Lstm: input and InputToInputWeights types are mismatched");
1866  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1867  reasonIfUnsupported,
1868  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1869  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1870  "Reference Lstm: input and InputGateBias types are mismatched");
1871  if (descriptor.m_PeepholeEnabled)
1872  {
1873  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1874  reasonIfUnsupported,
1875  "Reference Lstm: input and CellToInputWeights types are mismatched");
1876  }
1877  }
1878  if (descriptor.m_PeepholeEnabled)
1879  {
1880  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1881  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1882  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1883  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1884  }
1885  if (descriptor.m_ProjectionEnabled)
1886  {
1887  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1888  "Reference Lstm: input and mProjectionWeights types are mismatched");
1889  if (paramsInfo.m_ProjectionBias != nullptr)
1890  {
1891  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1892  "Reference Lstm: input and ProjectionBias types are mismatched");
1893  }
1894  }
1895  if (descriptor.m_LayerNormEnabled)
1896  {
1897  if (!descriptor.m_CifgEnabled)
1898  {
1899  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1900  reasonIfUnsupported,
1901  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1902  }
1903  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1904  reasonIfUnsupported,
1905  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1906  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1907  reasonIfUnsupported,
1908  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1909  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1910  reasonIfUnsupported,
1911  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1912  }
1913 
1914  return supported;
1915 }
1916 
1918  const TensorInfo& input1,
1919  const TensorInfo& output,
1921 {
1922  bool supported = true;
1923 
1924  std::array<DataType,7> supportedTypes = {
1932  };
1933 
1934  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1935  "Reference maximum: input 0 is not a supported type.");
1936 
1937  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1938  "Reference maximum: input 1 is not a supported type.");
1939 
1940  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1941  "Reference maximum: output is not a supported type.");
1942 
1943  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1944  "Reference maximum: input 0 and Input 1 types are mismatched");
1945 
1946  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1947  "Reference maximum: input and output types are mismatched");
1948 
1949  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1950  "Reference maximum: shapes are not suitable for implicit broadcast.");
1951 
1952  return supported;
1953 }
1954 
1956  const TensorInfo& output,
1957  const MeanDescriptor& descriptor,
1959 {
1960  bool supported = true;
1961  std::string meanLayerStr = "Mean";
1962  std::string outputTensorStr = "output";
1963 
1964  std::array<DataType,6> supportedTypes =
1965  {
1972  };
1973 
1974  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1975  "Reference Mean: input type not supported.");
1976 
1977  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1978  "Reference Mean: input and output types are mismatched");
1979 
1980  if (descriptor.m_KeepDims)
1981  {
1982  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1983  reasonIfUnsupported,
1984  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1985  output.GetNumDimensions(),
1986  meanLayerStr, outputTensorStr).data());
1987  }
1988  else if (descriptor.m_Axis.empty())
1989  {
1990  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1991  reasonIfUnsupported,
1992  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1993  meanLayerStr, outputTensorStr).data());
1994  }
1995  else
1996  {
1997  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1998 
1999  if (outputDim > 0)
2000  {
2001  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
2002  reasonIfUnsupported,
2003  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
2004  meanLayerStr, outputTensorStr).data());
2005  }
2006  else
2007  {
2008  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
2009  reasonIfUnsupported,
2010  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
2011  meanLayerStr, outputTensorStr).data());
2012  }
2013  }
2014 
2015  return supported;
2016 }
2017 
2019  const TensorInfo &output,
2021 {
2022  bool supported = true;
2023 
2024  std::array<DataType,7> supportedTypes =
2025  {
2033  };
2034 
2035  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2036  "Reference MemCopy: input type not supported");
2037 
2038  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2039  "Reference MemCopy: output type not supported");
2040 
2041  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2042  "Reference MemCopy: input and output types are mismatched");
2043 
2044  return supported;
2045 }
2046 
2048  const TensorInfo& input1,
2049  const TensorInfo& output,
2051 {
2052  bool supported = true;
2053 
2054  std::array<DataType,7> supportedTypes = {
2062  };
2063 
2064  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2065  "Reference minimum: input 0 is not a supported type.");
2066 
2067  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2068  "Reference minimum: input 1 is not a supported type.");
2069 
2070  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2071  "Reference minimum: output is not a supported type.");
2072 
2073  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2074  "Reference minimum: input 0 and Input 1 types are mismatched");
2075 
2076  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2077  "Reference minimum: input and output types are mismatched");
2078 
2079  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2080  "Reference minimum: shapes are not suitable for implicit broadcast.");
2081 
2082  return supported;
2083 }
2084 
2086  const TensorInfo& input1,
2087  const TensorInfo& output,
2089 {
2090  bool supported = true;
2091 
2092  std::array<DataType,7> supportedTypes = {
2100  };
2101 
2102  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2103  "Reference multiplication: input 0 is not a supported type.");
2104 
2105  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2106  "Reference multiplication: input 1 is not a supported type.");
2107 
2108  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2109  "Reference multiplication: output is not a supported type.");
2110 
2111  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2112  "Reference multiplication: input 0 and Input 1 types are mismatched");
2113 
2114  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2115  "Reference multiplication: input and output types are mismatched");
2116 
2117  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2118  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2119 
2120  return supported;
2121 }
2122 
2124  const TensorInfo& output,
2127 {
2128  IgnoreUnused(descriptor);
2129 
2130  // Define supported types
2131  std::array<DataType, 6> supportedTypes =
2132  {
2139  };
2140 
2141  bool supported = true;
2142 
2143  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2144  "Reference normalization: input type not supported.");
2145 
2146  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2147  "Reference normalization: output type not supported.");
2148 
2149  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2150  "Reference normalization: input and output shapes have different "
2151  "num total elements.");
2152 
2153  return supported;
2154 }
2155 
2157  Optional<std::string&> /*reasonIfUnsupported*/) const
2158 {
2159  return true;
2160 }
2161 
2163  const TensorInfo& output,
2164  const PadDescriptor& descriptor,
2166 {
2167  IgnoreUnused(descriptor);
2168  bool supported = true;
2169 
2170  // Define supported output and inputs types.
2171  std::array<DataType,6> supportedTypes =
2172  {
2179  };
2180 
2181  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2182  "Reference pad: input is not a supported type.");
2183 
2184  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2185  "Reference pad: output is not a supported type.");
2186 
2187  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2188  "Reference pad: input and output types are mismatched.");
2189 
2190  return supported;
2191 }
2192 
2194  const TensorInfo& output,
2197 {
2198  IgnoreUnused(descriptor);
2199  bool supported = true;
2200 
2201  // Define supported output and inputs types.
2202  std::array<DataType, 6> supportedTypes =
2203  {
2210  };
2211 
2212  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2213  "Reference permute: input is not a supported type.");
2214 
2215  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2216  "Reference permute: output is not a supported type.");
2217 
2218  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2219  "Reference permute: input and output types are mismatched.");
2220 
2221  return supported;
2222 }
2223 
2225  const TensorInfo& output,
2228 {
2229  IgnoreUnused(descriptor);
2230  bool supported = true;
2231 
2232  // Define supported output and inputs types.
2233  std::array<DataType,6> supportedTypes =
2234  {
2241  };
2242 
2243  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2244  "Reference poolind2d: input is not a supported type.");
2245 
2246  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2247  "Reference poolind2d: output is not a supported type.");
2248 
2249  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2250  "Reference poolind2d: input and output types are mismatched.");
2251 
2252  return supported;
2253 }
2254 
2256  const TensorInfo& output,
2259 {
2260  IgnoreUnused(descriptor);
2261  bool supported = true;
2262 
2263  // Define supported output and inputs types.
2264  std::array<DataType,6> supportedTypes =
2265  {
2272  };
2273 
2274  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2275  "Reference poolind3d: input is not a supported type.");
2276 
2277  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2278  "Reference poolind3d: output is not a supported type.");
2279 
2280  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2281  "Reference poolind3d: input and output types are mismatched.");
2282 
2283  return supported;
2284 }
2285 
2286 
2290  const TensorInfo& outputStateOut,
2291  const TensorInfo& cellStateOut,
2292  const TensorInfo& output,
2293  const QLstmDescriptor& descriptor,
2296 {
2297  IgnoreUnused(input);
2298  IgnoreUnused(previousOutputIn);
2299  IgnoreUnused(previousCellStateIn);
2300  IgnoreUnused(outputStateOut);
2301  IgnoreUnused(cellStateOut);
2302  IgnoreUnused(output);
2303  IgnoreUnused(descriptor);
2304  IgnoreUnused(paramsInfo);
2305 
2306  IgnoreUnused(reasonIfUnsupported);
2307 
2308  return true;
2309 }
2310 
2312  const TensorInfo& output,
2314 {
2315  bool supported = true;
2316 
2317  // Define supported input types.
2318  std::array<DataType,7> supportedInputTypes = {
2326  };
2327 
2328  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2329  "Reference quantize: input type not supported.");
2330 
2331  // Define supported output types.
2332  std::array<DataType,4> supportedOutputTypes = {
2336  DataType::QSymmS16
2337  };
2338  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2339  "Reference quantize: output type not supported.");
2340 
2341  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2342  "Reference quantize: input and output shapes have different num total elements.");
2343 
2344  return supported;
2345 }
2346 
2348  const TensorInfo& output,
2350 {
2351  IgnoreUnused(input);
2352  // Define supported output types.
2353  std::array<DataType,1> supportedOutputTypes =
2354  {
2356  };
2357 
2358  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2359  "Reference rank: input type not supported.");
2360 }
2361 
2363  const TensorInfo& output,
2366 {
2367  IgnoreUnused(descriptor);
2368  bool supported = true;
2369  std::array<DataType,7> supportedTypes =
2370  {
2378  };
2379 
2380  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2381  "Reference Reduce: input type not supported");
2382 
2383  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2384  "Reference Reduce: output type not supported");
2385 
2386  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2387  "Reference Reduce: input and output types not matching");
2388 
2389  return supported;
2390 }
2391 
2393  const TensorInfo& output,
2396 {
2397  IgnoreUnused(output);
2398  IgnoreUnused(descriptor);
2399  // Define supported output types.
2400  std::array<DataType,8> supportedOutputTypes =
2401  {
2410  };
2411 
2412  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2413  "Reference reshape: input type not supported.");
2414 }
2415 
2417  const TensorInfo& output,
2420 {
2421  IgnoreUnused(descriptor);
2422  bool supported = true;
2423  std::array<DataType,6> supportedTypes =
2424  {
2431  };
2432 
2433  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2434  "Reference Resize: input type not supported");
2435 
2436  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2437  "Reference Resize: output type not supported");
2438 
2439  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2440  "Reference Resize: input and output types not matching");
2441 
2442  return supported;
2443 }
2444 
2446  const TensorInfo& output,
2448 {
2449  IgnoreUnused(input);
2450  bool supported = true;
2451 
2452  std::array<DataType, 1> supportedTypes =
2453  {
2455  };
2456 
2457  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2458  "Reference Shape: output type not supported");
2459 
2460  return supported;
2461 }
2462 
2464  const TensorInfo& output,
2465  const SliceDescriptor& descriptor,
2467 {
2468  IgnoreUnused(descriptor);
2469  bool supported = true;
2470 
2471  std::array<DataType, 5> supportedTypes =
2472  {
2478  };
2479 
2480  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2481  "Reference Slice: input type not supported");
2482 
2483  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2484  "Reference Slice: output type not supported");
2485 
2486  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2487  "Reference Slice: input and output types are mismatched");
2488 
2489  return supported;
2490 }
2491 
2493  const TensorInfo& output,
2496 {
2497  IgnoreUnused(descriptor);
2498  bool supported = true;
2499  std::array<DataType,7> supportedTypes =
2500  {
2508  };
2509 
2510  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2511  "Reference Softmax: output type not supported");
2512 
2513  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2514  "Reference Softmax: input type not supported");
2515 
2516  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2517  "Reference Softmax: input type not supported");
2518 
2519  return supported;
2520 }
2521 
2523  const TensorInfo& output,
2526 {
2527  IgnoreUnused(descriptor);
2528  bool supported = true;
2529  std::array<DataType,6> supportedTypes =
2530  {
2537  };
2538 
2539  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2540  "Reference SpaceToBatchNd: input type not supported");
2541 
2542  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2543  "Reference SpaceToBatchNd: output type not supported");
2544 
2545  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2546  "Reference SpaceToBatchNd: input and output types are mismatched");
2547 
2548  return supported;
2549 }
2550 
2552  const TensorInfo& output,
2555 {
2556 
2557  IgnoreUnused(descriptor);
2558  bool supported = true;
2559 
2560  std::array<DataType,6> supportedTypes =
2561  {
2568  };
2569 
2570  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2571  "Reference SpaceToDepth: input type not supported");
2572 
2573  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2574  "Reference SpaceToDepth: output type not supported");
2575 
2576  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2577  "Reference SpaceToDepth: input and output types are mismatched");
2578 
2579  return supported;
2580 }
2581 
2583  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2584  const ViewsDescriptor& descriptor,
2586 {
2587  IgnoreUnused(descriptor);
2588  bool supported = true;
2589  std::array<DataType,6> supportedTypes =
2590  {
2597  };
2598 
2599  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2600  "Reference splitter: output type not supported");
2601  for (const TensorInfo& output : outputs)
2602  {
2603  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2604  "Reference splitter: input type not supported");
2605 
2606  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2607  "Reference splitter: input and output types mismatched.");
2608  }
2609 
2610  return supported;
2611 }
2612 
2613 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2614  const TensorInfo& output,
2615  const StackDescriptor& descriptor,
2617 {
2618  IgnoreUnused(descriptor);
2619 
2620  bool supported = true;
2621  std::array<DataType,7> supportedTypes =
2622  {
2630  };
2631 
2632  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2633  "Reference stack: output type not supported");
2634  for (const TensorInfo* input : inputs)
2635  {
2636  ARMNN_ASSERT(input != nullptr);
2637  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2638  "Reference stack: input type not supported");
2639 
2640  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2641  "Reference stack: input and output types mismatched.");
2642  }
2643 
2644  return supported;
2645 }
2646 
2648  const TensorInfo& output,
2651 {
2652  IgnoreUnused(descriptor);
2653  bool supported = true;
2654 
2655  std::array<DataType,5> supportedTypes =
2656  {
2662  };
2663 
2664  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2665  "Reference StridedSlice: input type not supported");
2666 
2667  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2668  "Reference StridedSlice: output type not supported");
2669 
2670  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2671  "Reference StridedSlice: input and output types are mismatched");
2672 
2673  return supported;
2674 }
2675 
2677  const TensorInfo& input1,
2678  const TensorInfo& output,
2680 {
2681  bool supported = true;
2682 
2683  std::array<DataType,7> supportedTypes = {
2691  };
2692 
2693  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2694  "Reference subtraction: input 0 is not a supported type.");
2695 
2696  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2697  "Reference subtraction: input 1 is not a supported type.");
2698 
2699  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2700  "Reference subtraction: output is not a supported type.");
2701 
2702  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2703  "Reference subtraction: input 0 and Input 1 types are mismatched");
2704 
2705  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2706  "Reference subtraction: input and output types are mismatched");
2707 
2708  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2709  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2710 
2711  return supported;
2712 }
2713 
2715  const TensorInfo& alpha,
2716  const TensorInfo& output,
2718 {
2719  bool supported = true;
2720 
2721  std::array<DataType, 6> supportedTypes
2722  {
2729  };
2730 
2731  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2732  "PReLU: input is not a supported type.");
2733 
2734  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2735  "PReLU: alpha is not a supported type.");
2736 
2737  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2738  "PReLU: output is not a supported type.");
2739 
2740  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2741  "PReLU: input, alpha and output types are mismatched");
2742 
2743  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2744  "PReLU: shapes are not suitable for implicit broadcast");
2745 
2746  return supported;
2747 }
2748 
2750  const TensorInfo& output,
2752  const TensorInfo& weights,
2755 {
2756  IgnoreUnused(descriptor);
2757  bool supported = true;
2758 
2759  std::array<DataType,7> supportedTypes =
2760  {
2768  };
2769 
2770  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2771  "Reference TransposeConvolution2d: input is not a supported type.");
2772 
2773  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2774  "Reference TransposeConvolution2d: output is not a supported type.");
2775 
2776  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2777  "Reference TransposeConvolution2d: input and output types mismatched.");
2778 
2779 
2780  const DataType inputType = input.GetDataType();
2781  if (IsQuantized8BitType(inputType))
2782  {
2783  std::array<DataType, 3> supportedWeightTypes =
2784  {
2788  };
2789 
2790  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2791  "Reference TransposeConvolution2d: weights type not supported for "
2792  "quantized input.");
2793  }
2794  else
2795  {
2796  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2797  "Reference TransposeConvolution2d: weights is not a supported type.");
2798 
2799  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2800  "Reference TransposeConvolution2d: input and weights types mismatched.");
2801  }
2802 
2803  if (biases.has_value())
2804  {
2805  std::array<DataType,4> biasesSupportedTypes =
2806  {
2811  };
2812  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2813  "Reference TransposeConvolution2d: biases is not a supported type.");
2814  }
2815 
2816  return supported;
2817 }
2818 
2820  const TensorInfo& output,
2823 {
2824  IgnoreUnused(descriptor);
2825  bool supported = true;
2826 
2827  // Define supported output and inputs types.
2828  std::array<DataType, 6> supportedTypes =
2829  {
2836  };
2837 
2838  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2839  "Reference transpose: input is not a supported type.");
2840 
2841  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2842  "Reference transpose: output is not a supported type.");
2843 
2844  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2845  "Reference transpose: input and output types are mismatched.");
2846 
2847  return supported;
2848 }
2849 
2851  const TensorInfo& input,
2852  const TensorInfo& outputStateIn,
2853  const TensorInfo& cellStateIn,
2854  const TensorInfo& outputStateOut,
2855  const TensorInfo& cellStateOut,
2856  const TensorInfo& output,
2860 {
2861  IgnoreUnused(descriptor);
2862  IgnoreUnused(paramsInfo);
2863  IgnoreUnused(outputStateIn);
2864  IgnoreUnused(cellStateIn);
2865  IgnoreUnused(outputStateOut);
2866  IgnoreUnused(cellStateOut);
2867  bool supported = true;
2868 
2869  std::array<DataType, 2> supportedTypes =
2870  {
2873  };
2874 
2875  std::array<DataType, 2> supportedWeightTypes =
2876  {
2878  DataType::QAsymmS8
2879  };
2880 
2881  std::array<DataType, 3> supportedBiasTypes =
2882  {
2886  };
2887 
2888  // check inputs and outputs
2889  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2890  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2891  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2892  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2893 
2894  // check layer parameters
2895  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2896  reasonIfUnsupported,
2897  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2898  "is not a supported type.");
2899  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2900  reasonIfUnsupported,
2901  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2902  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2903  reasonIfUnsupported,
2904  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2905  "is not a supported type.");
2906  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2907  reasonIfUnsupported,
2908  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2909  "is not a supported type.");
2910  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2911  reasonIfUnsupported,
2912  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2913  "is not a supported type.");
2914  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2915  reasonIfUnsupported,
2916  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2917  "is not a supported type.");
2918 
2919  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2920  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2921  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2922  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2923  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2924  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2925  if (!descriptor.m_CifgEnabled)
2926  {
2927  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2928  reasonIfUnsupported,
2929  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2930  "is not a supported type.");
2931  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2932  reasonIfUnsupported,
2933  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2934  "is not a supported type.");
2935  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2936  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2937  if (descriptor.m_PeepholeEnabled)
2938  {
2939  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2940  reasonIfUnsupported,
2941  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2942  "is not a supported type.");
2943  }
2944  }
2945  if (descriptor.m_PeepholeEnabled)
2946  {
2947  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2948  reasonIfUnsupported,
2949  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2950  "is not a supported type.");
2951  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2952  reasonIfUnsupported,
2953  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2954  "is not a supported type.");
2955  }
2956  if (descriptor.m_ProjectionEnabled)
2957  {
2958  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2959  reasonIfUnsupported,
2960  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2961  "is not a supported type.");
2962  if (paramsInfo.m_ProjectionBias != nullptr)
2963  {
2964  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2965  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2966  "are mismatched");
2967  }
2968  }
2969  if (descriptor.m_LayerNormEnabled)
2970  {
2971  if (!descriptor.m_CifgEnabled)
2972  {
2973  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2974  reasonIfUnsupported,
2975  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2976  "is not a supported type.");
2977  }
2978  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2979  reasonIfUnsupported,
2980  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2981  "is not a supported type.");
2982  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2983  reasonIfUnsupported,
2984  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2985  "is not a supported type.");
2986  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2987  reasonIfUnsupported,
2988  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2989  "is not a supported type.");
2990  }
2991 
2992  return supported;
2993 }
2994 
2995 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
const TensorInfo const TensorInfo & anchors
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & output
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
const TensorInfo & scores
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
const TensorInfo const ActivationDescriptor & descriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & previousCellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
const TensorInfo & alpha
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PadDescriptor for the PadLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:48
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo & cellStateIn
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_BiasEnabled
Enable/disable bias.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
min(a, max(b, input)) ReLu1 & ReLu6.
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
const TensorInfo & previousOutputIn
A BatchMatMulDescriptor for the BatchMatMul operator.
A Pooling3dDescriptor for the Pooling3dLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
A MeanDescriptor for the MeanLayer.
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & input1
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo & mean
A PermuteDescriptor for the PermuteLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468