ArmNN
 22.05
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74  reasonIfUnsupported);
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81  reasonIfUnsupported);
83  return IsBatchNormalizationSupported(infos[0],
84  infos[1],
85  infos[2],
86  infos[3],
87  infos[4],
88  infos[5],
89  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
90  (&descriptor)),
91  reasonIfUnsupported);
93  return IsBatchToSpaceNdSupported(infos[0],
94  infos[1],
95  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
96  reasonIfUnsupported);
98  return IsComparisonSupported(infos[0],
99  infos[1],
100  infos[2],
101  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
102  reasonIfUnsupported);
103  case LayerType::Concat:
104  {
105  std::vector<const TensorInfo*> inputInfos;
106  for (uint32_t i = 0; i < (infos.size() - 1); i++)
107  {
108  inputInfos.push_back(&infos[i]);
109  }
110  return IsConcatSupported(inputInfos,
111  infos[infos.size() - 1],
112  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
114  }
115  case LayerType::Constant:
116  return IsConstantSupported(infos[0], reasonIfUnsupported);
118  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
120  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
122  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
124  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
126  {
127  if (infos.size() != 4)
128  {
129  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130  "TensorInfos should be of format: {input, output, weights, biases}.");
131  }
132 
133  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
134  if (infos[3] == TensorInfo())
135  {
136  return IsConvolution2dSupported(infos[0],
137  infos[1],
138  desc,
139  infos[2],
140  EmptyOptional(),
141  reasonIfUnsupported);
142  }
143  else
144  {
145  return IsConvolution2dSupported(infos[0],
146  infos[1],
147  desc,
148  infos[2],
149  infos[3],
150  reasonIfUnsupported);
151  }
152  }
154  return IsDepthToSpaceSupported(infos[0],
155  infos[1],
156  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
157  reasonIfUnsupported);
159  {
160  if (infos.size() != 4)
161  {
162  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
163  "TensorInfos should be of format: {input, output, weights, biases}.");
164  }
165 
166  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
167  if (infos[3] == TensorInfo())
168  {
169  return IsDepthwiseConvolutionSupported(infos[0],
170  infos[1],
171  desc,
172  infos[2],
173  EmptyOptional(),
174  reasonIfUnsupported);
175  }
176  else
177  {
178  return IsDepthwiseConvolutionSupported(infos[0],
179  infos[1],
180  desc,
181  infos[2],
182  infos[3],
183  reasonIfUnsupported);
184  }
185  }
187  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
188  case LayerType::Division:
189  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
191  return IsElementwiseUnarySupported(infos[0],
192  infos[1],
193  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
194  reasonIfUnsupported);
195  case LayerType::Fill:
196  return IsFillSupported(infos[0],
197  infos[1],
198  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
199  reasonIfUnsupported);
200  case LayerType::Floor:
201  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
203  return IsFullyConnectedSupported(infos[0],
204  infos[1],
205  infos[2],
206  infos[3],
207  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
208  reasonIfUnsupported);
209  case LayerType::Gather:
210  return IsGatherSupported(infos[0],
211  infos[1],
212  infos[2],
213  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
214  reasonIfUnsupported);
215  case LayerType::GatherNd:
216  return IsGatherNdSupported(infos[0],
217  infos[1],
218  infos[2],
219  reasonIfUnsupported);
220  case LayerType::Input:
221  return IsInputSupported(infos[0], reasonIfUnsupported);
223  return IsInstanceNormalizationSupported(infos[0],
224  infos[1],
225  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
226  (&descriptor)),
227  reasonIfUnsupported);
229  return IsL2NormalizationSupported(infos[0],
230  infos[1],
231  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
232  reasonIfUnsupported);
234  return IsLogicalBinarySupported(infos[0],
235  infos[1],
236  infos[2],
237  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
238  reasonIfUnsupported);
240  return IsLogSoftmaxSupported(infos[0],
241  infos[1],
242  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
243  reasonIfUnsupported);
244  case LayerType::Lstm:
245  return IsLstmSupported(infos[0],
246  infos[1],
247  infos[2],
248  infos[3],
249  infos[4],
250  infos[5],
251  infos[6],
252  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
253  lstmParamsInfo.value(),
255  case LayerType::QLstm:
256  return IsQLstmSupported(infos[0],
257  infos[1],
258  infos[2],
259  infos[3],
260  infos[4],
261  infos[5],
262  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
263  lstmParamsInfo.value(),
265  case LayerType::Maximum:
266  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
267  case LayerType::Mean:
268  return IsMeanSupported(infos[0],
269  infos[1],
270  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
271  reasonIfUnsupported);
272  case LayerType::Minimum:
273  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
275  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
277  return IsNormalizationSupported(infos[0],
278  infos[1],
279  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
280  reasonIfUnsupported);
281  case LayerType::Output:
282  return IsOutputSupported(infos[0], reasonIfUnsupported);
283  case LayerType::Pad:
284  return IsPadSupported(infos[0],
285  infos[1],
286  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
287  reasonIfUnsupported);
288  case LayerType::Permute:
289  return IsPermuteSupported(infos[0],
290  infos[1],
291  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
292  reasonIfUnsupported);
294  return IsPooling2dSupported(infos[0],
295  infos[1],
296  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
297  reasonIfUnsupported);
298  case LayerType::Prelu:
299  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
300  case LayerType::Quantize:
301  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
302  case LayerType::Reshape:
303  return IsReshapeSupported(infos[0],
304  infos[1],
305  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
306  reasonIfUnsupported);
307  case LayerType::Resize:
308  return IsResizeSupported(infos[0],
309  infos[1],
310  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
311  reasonIfUnsupported);
312  case LayerType::Reduce:
313  return IsReduceSupported(infos[0],
314  infos[1],
315  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
316  reasonIfUnsupported);
317  case LayerType::Slice:
318  return IsSliceSupported(infos[0],
319  infos[1],
320  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
321  reasonIfUnsupported);
322  case LayerType::Softmax:
323  return IsSoftmaxSupported(infos[0],
324  infos[1],
325  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
326  reasonIfUnsupported);
328  return IsSpaceToBatchNdSupported(infos[0],
329  infos[1],
330  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
331  reasonIfUnsupported);
333  return IsSpaceToDepthSupported(infos[0],
334  infos[1],
335  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
336  reasonIfUnsupported);
337  case LayerType::Splitter:
338  {
339  std::vector<TensorInfo> outputInfos;
340  for (uint32_t i = 1; i < infos.size(); i++)
341  {
342  outputInfos.push_back(infos[i]);
343  }
344  return IsSplitterSupported(infos[0],
345  {outputInfos.begin(), outputInfos.end()},
346  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
348  }
349  case LayerType::Stack:
350  {
351  std::vector<const TensorInfo*> inputInfos;
352  for (uint32_t i = 0; i < infos.size() - 1; i++)
353  {
354  inputInfos.push_back(&infos[i]);
355  }
356  return IsStackSupported(inputInfos,
357  infos[infos.size() - 1],
358  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
360  }
362  return IsStridedSliceSupported(infos[0],
363  infos[1],
364  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
365  reasonIfUnsupported);
367  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
369  return IsTransposeSupported(infos[0],
370  infos[1],
371  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
372  reasonIfUnsupported);
374  {
375  if (infos.size() != 4)
376  {
377  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
378  "TensorInfos should be of format: {input, output, weights, biases}.");
379  }
380 
381  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
382  if (infos[3] == TensorInfo())
383  {
384  return IsTransposeConvolution2dSupported(infos[0],
385  infos[1],
386  desc,
387  infos[2],
388  EmptyOptional(),
389  reasonIfUnsupported);
390  }
391  else
392  {
393  return IsTransposeConvolution2dSupported(infos[0],
394  infos[1],
395  desc,
396  infos[2],
397  infos[3],
398  reasonIfUnsupported);
399  }
400  }
401  case LayerType::Cast:
402  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
404  return IsChannelShuffleSupported(infos[0],
405  infos[1],
406  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
407  reasonIfUnsupported);
409  {
410  if (infos.size() != 4)
411  {
412  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
413  "TensorInfos should be of format: {input, output, weights, biases}.");
414  }
415 
416  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
417  if (infos[3] == TensorInfo())
418  {
419  return IsConvolution3dSupported(infos[0],
420  infos[1],
421  desc,
422  infos[2],
423  EmptyOptional(),
424  reasonIfUnsupported);
425  }
426  else
427  {
428  return IsConvolution3dSupported(infos[0],
429  infos[1],
430  desc,
431  infos[2],
432  infos[3],
433  reasonIfUnsupported);
434  }
435  }
436  case LayerType::Debug:
437  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
439  return IsDetectionPostProcessSupported(infos[0],
440  infos[1],
441  infos[2],
442  infos[3],
443  infos[4],
444  infos[5],
445  infos[6],
446  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
447  (&descriptor)),
448  reasonIfUnsupported);
450  return IsFakeQuantizationSupported(infos[0],
451  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
452  reasonIfUnsupported);
453  case LayerType::MemCopy:
454  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
455  case LayerType::Rank:
456  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
457  case LayerType::Shape:
458  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
460  {
461  if (infos.size() != 6)
462  {
463  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
464  "should be of format: {input, outputStateIn, cellStateIn, "
465  "hiddenStateOutputVal, cellStateOutputVal, output}");
466  }
467  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
469  infos[1],
470  infos[2],
471  infos[3],
472  infos[4],
473  infos[5],
474  desc,
475  lstmParamsInfo.value(),
477  }
479  return IsPooling3dSupported(infos[0],
480  infos[1],
481  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
482  reasonIfUnsupported);
483  case LayerType::Map:
484  return true;
485  case LayerType::Unmap:
486  return true;
488  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
489  case LayerType::Merge:
490  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
493  infos[1],
494  infos[2],
495  infos[3],
496  infos[4],
497  quantizedLstmInputParamsInfo.value(),
499  default:
500  // layers not supported in neon by default:
501  // precompiled, standin, switch
502  return false;
503  }
504 }
505 
507  const TensorInfo& output,
510 {
511  bool supported = true;
512 
513  // Define supported types.
514  std::array<DataType,6> supportedTypes = {
521  };
522 
523  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
524  "Reference activation: input type not supported.");
525 
526  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
527  "Reference activation: output type not supported.");
528 
529  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
530  "Reference activation: input and output types mismatched.");
531 
532  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
533  "Reference activation: input and output shapes are of different rank.");
534 
535 
536  struct ActivationFunctionSupported : public Rule
537  {
538  ActivationFunctionSupported(const ActivationDescriptor& desc)
539  {
540  switch(desc.m_Function)
541  {
554  {
555  m_Res = true;
556  break;
557  }
558  default:
559  {
560  m_Res = false;
561  break;
562  }
563  }
564  }
565  };
566 
567  // Function is supported
568  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
569  "Reference activation: function not supported.");
570 
571  return supported;
572 }
573 
575  const TensorInfo& input1,
576  const TensorInfo& output,
578 {
579  bool supported = true;
580 
581  std::array<DataType,7> supportedTypes = {
589  };
590 
591  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
592  "Reference addition: input 0 is not a supported type.");
593 
594  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
595  "Reference addition: input 1 is not a supported type.");
596 
597  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
598  "Reference addition: output is not a supported type.");
599 
600  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
601  "Reference addition: input 0 and Input 1 types are mismatched");
602 
603  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
604  "Reference addition: input and output types are mismatched");
605 
606  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
607  "Reference addition: shapes are not suitable for implicit broadcast.");
608 
609  return supported;
610 }
611 
615 {
616  IgnoreUnused(descriptor);
617 
618  std::array<DataType, 8> supportedInputTypes =
619  {
628  };
629 
630  std::array<DataType,2> supportedOutputTypes = {
632  DataType::Signed64
633  };
634 
635  bool supported = true;
636 
637  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
638  "Reference ArgMinMax: input is not a supported type.");
639  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
640  "Reference ArgMinMax: output type not supported");
641 
642  return supported;
643 }
644 
646  const TensorInfo& output,
647  const TensorInfo& mean,
648  const TensorInfo& variance,
649  const TensorInfo& beta,
650  const TensorInfo& gamma,
653 {
654  IgnoreUnused(descriptor);
655 
656  std::array<DataType, 6> supportedTypes =
657  {
664  };
665 
666  bool supported = true;
667 
668  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
669  "Reference batch normalization: input is not a supported type.");
670 
671  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
672  "Reference batch normalization: output is not a supported type.");
673 
674  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
675  "Reference batch normalization: input and output types are mismatched");
676 
677  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
678  "Reference batch normalization: mean is not a supported type.");
679 
680  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
681  "Reference batch normalization: variance is not a supported type.");
682 
683  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
684  "Reference batch normalization: beta is not a supported type.");
685 
686  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
687  "Reference batch normalization: gamma is not a supported type.");
688 
689  return supported;
690 }
691 
693  const TensorInfo& output,
696 {
697  IgnoreUnused(descriptor);
698 
699  bool supported = true;
700 
701  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
702  std::string inputTensorStr = "input";
703  std::string outputTensorStr = "output";
704 
705  // Define supported types.
706  std::array<DataType,6> supportedTypes =
707  {
714  };
715 
716  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
717  "Reference BatchToSpaceNd: input type not supported.");
718 
719  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
720  "Reference BatchToSpaceNd: output type not supported.");
721 
722  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
723  "Reference BatchToSpaceNd: input and output types mismatched.");
724 
725  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
726  reasonIfUnsupported,
727  CreateIncorrectDimensionsErrorMsg(4,
728  output.GetNumDimensions(),
729  batchToSpaceNdLayerStr,
730  outputTensorStr).data());
731 
732  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
733  reasonIfUnsupported,
734  CreateIncorrectDimensionsErrorMsg(4,
735  input.GetNumDimensions(),
736  batchToSpaceNdLayerStr,
737  inputTensorStr).data());
738 
739  return supported;
740 }
741 
743  const TensorInfo& output,
745 {
746  std::array<DataType, 9> supportedInputTypes =
747  {
756  };
757 
758  bool supported = true;
759  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
760  "Reference cast: input is not a supported type");
761 
762 
763  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
764  "Reference cast: output is not a supported type");
765 
766  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
767  "Reference cast: input and output shapes have different number of total elements");
768 
769  return supported;
770 }
771 
773  const TensorInfo& output,
776 {
777  IgnoreUnused(descriptor);
778  bool supported = true;
779 
780  // Define supported output and inputs types.
781  std::array<DataType, 7> supportedTypes =
782  {
790  };
791 
792  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
793  "Reference ChannelShuffle: input is not a supported type.");
794 
795  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
796  "Reference ChannelShuffle: output is not a supported type.");
797 
798  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
799  "Reference ChannelShuffle: input and output types are mismatched.");
800 
801  return supported;
802 }
803 
804 
806  const TensorInfo& input1,
807  const TensorInfo& output,
810 {
811  IgnoreUnused(descriptor);
812  std::array<DataType, 8> supportedInputTypes =
813  {
822  };
823 
824  bool supported = true;
825  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
826  "Reference comparison: input 0 is not a supported type");
827 
828  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
829  "Reference comparison: input 0 and Input 1 types are mismatched");
830 
831  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
832  "Reference comparison: output is not of type Boolean");
833 
834  return supported;
835 }
836 
837 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
838  const TensorInfo& output,
841 {
842  IgnoreUnused(descriptor);
843 
844  bool supported = true;
845  std::array<DataType,7> supportedTypes =
846  {
854  };
855 
856  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
857  "Reference concatenation: output type not supported");
858  for (const TensorInfo* input : inputs)
859  {
860  ARMNN_ASSERT(input != nullptr);
861  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
862  "Reference concatenation: input type not supported");
863 
864  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
865  "Reference concatenation: input and output types mismatched.");
866  }
867 
868  return supported;
869 }
870 
873 {
874  std::array<DataType,8> supportedTypes =
875  {
884  };
885 
886  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
887  "Reference constant: output is not a supported type.");
888 }
889 
891  const TensorInfo& output,
893 {
894  bool supported = true;
895 
896  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
897  "Reference for ConvertBf16ToFp32 layer: input type not supported");
898 
899  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
900  "Reference for ConvertBf16ToFp32 layer: output type not supported");
901 
902  return supported;
903 }
904 
906  const TensorInfo& output,
908 {
909  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
910  input.GetDataType(),
911  &TrueFunc<>,
912  &FalseInputFuncF32<>,
913  &FalseFuncU8<>,
914  &FalseFuncI32<>,
915  &FalseFuncU8<>) &&
916  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
917  output.GetDataType(),
918  &FalseOutputFuncF16<>,
919  &TrueFunc<>,
920  &FalseFuncU8<>,
921  &FalseFuncI32<>,
922  &FalseFuncU8<>));
923 }
924 
926  const TensorInfo& output,
928 {
929  bool supported = true;
930 
931  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
932  "Reference for ConvertFp32ToBf16 layer: input type not supported");
933 
934  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
935  "Reference for ConvertFp32ToBf16 layer: output type not supported");
936 
937  return supported;
938 }
939 
941  const TensorInfo& output,
943 {
944  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
945  input.GetDataType(),
946  &FalseInputFuncF16<>,
947  &TrueFunc<>,
948  &FalseFuncU8<>,
949  &FalseFuncI32<>,
950  &FalseFuncU8<>) &&
951  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
952  output.GetDataType(),
953  &TrueFunc<>,
954  &FalseOutputFuncF32<>,
955  &FalseFuncU8<>,
956  &FalseFuncI32<>,
957  &FalseFuncU8<>));
958 }
959 
961  const TensorInfo& output,
963  const TensorInfo& weights,
966 {
967  bool supported = true;
968 
969  // Define supported types.
970  std::array<DataType,7> supportedTypes =
971  {
979  };
980 
981  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
982  "Reference Convolution2d: input is not a supported type.");
983 
984  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
985  "Reference Convolution2d: output is not a supported type.");
986 
987  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
988  if (input.GetDataType() == DataType::BFloat16)
989  {
990  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
991  {
992  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
993  supported = false;
994  }
995  }
996  else
997  {
998  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
999  "Reference Convolution2d: input and output types mismatched.");
1000  }
1001 
1002  const DataType inputType = input.GetDataType();
1003  if (IsQuantized8BitType(inputType))
1004  {
1005  std::array<DataType, 3> supportedWeightTypes =
1006  {
1010  };
1011 
1012  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1013  "Reference Convolution2d: weights type not supported for quantized input.");
1014  }
1015  else
1016  {
1017  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1018  "Reference Convolution2d: weights is not a supported type.");
1019 
1020  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1021  "Reference Convolution2d: input and weights types mismatched.");
1022  }
1023 
1024  if (biases.has_value())
1025  {
1026  std::array<DataType,4> biasesSupportedTypes =
1027  {
1032  };
1033 
1034  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1035  "Reference Convolution2d: biases is not a supported type.");
1036  }
1037  IgnoreUnused(descriptor);
1038 
1039  return supported;
1040 }
1041 
1043  const TensorInfo& output,
1045  const TensorInfo& weights,
1048 {
1049  bool supported = true;
1050 
1051  // Define supported types.
1052  std::array<DataType,7> supportedTypes =
1053  {
1061  };
1062 
1063  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1064  "Reference Convolution3d: input is not a supported type.");
1065 
1066  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1067  "Reference Convolution3d: output is not a supported type.");
1068 
1069  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1070  "Reference Convolution3d: input and output types mismatched.");
1071 
1072  const DataType inputType = input.GetDataType();
1073  if (IsQuantized8BitType(inputType))
1074  {
1075  std::array<DataType, 3> supportedWeightTypes =
1076  {
1080  };
1081 
1082  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1083  "Reference Convolution3d: weights type not supported for quantized input.");
1084  }
1085  else
1086  {
1087  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1088  "Reference Convolution3d: weights is not a supported type.");
1089 
1090  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1091  "Reference Convolution3d: input and weights types mismatched.");
1092  }
1093 
1094  if (biases.has_value())
1095  {
1096  std::array<DataType,4> biasesSupportedTypes =
1097  {
1102  };
1103 
1104  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1105  "Reference Convolution3d: biases is not a supported type.");
1106  }
1107  IgnoreUnused(descriptor);
1108 
1109  return supported;
1110 }
1111 
1113  const TensorInfo& output,
1115 {
1116  bool supported = true;
1117 
1118  std::array<DataType, 8> supportedTypes =
1119  {
1128  };
1129 
1130  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1131  "Reference for Debug layer: input type not supported");
1132 
1133  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1134  "Reference for Debug layer: output type not supported");
1135 
1136  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1137  "Reference for Debug layer: input and output types are mismatched");
1138 
1139  return supported;
1140 }
1141 
1143  const TensorInfo& output,
1146 {
1147  IgnoreUnused(descriptor);
1148  bool supported = true;
1149 
1150  std::array<DataType,6> supportedTypes =
1151  {
1158  };
1159 
1160  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1161  "Reference DepthToSpace: input type not supported");
1162 
1163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1164  "Reference DepthToSpace: output type not supported");
1165 
1166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1167  "Reference DepthToSpace: input and output types are mismatched");
1168 
1169  return supported;
1170 }
1171 
1173  const TensorInfo& output,
1175  const TensorInfo& weights,
1178 {
1179  IgnoreUnused(descriptor);
1180  bool supported = true;
1181 
1182  // Define supported types.
1183  std::array<DataType,7> supportedTypes =
1184  {
1192  };
1193 
1194  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1195  "Reference DepthwiseConvolution2d: input is not a supported type.");
1196 
1197  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1198  "Reference DepthwiseConvolution2d: output is not a supported type.");
1199 
1200  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1201  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1202 
1203  const DataType inputType = input.GetDataType();
1204  if (IsQuantized8BitType(inputType))
1205  {
1206  std::array<DataType, 3> supportedWeightTypes =
1207  {
1211  };
1212 
1213  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1214  "Reference DepthwiseConvolution2d: weights type not supported for "
1215  "quantized input.");
1216  }
1217  else
1218  {
1219  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1220  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1221 
1222  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1223  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1224  }
1225 
1226  if (biases.has_value())
1227  {
1228  std::array<DataType,4> biasesSupportedTypes =
1229  {
1234  };
1235  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1236  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1237  }
1238 
1239  return supported;
1240 
1241 }
1242 
1244  const TensorInfo& output,
1246 {
1247  bool supported = true;
1248 
1249  std::array<DataType,5> supportedInputTypes = {
1255  };
1256 
1257  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1258  "Reference for Dequantize layer: input type not supported.");
1259 
1260  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1261  "Reference for Dequantize layer: per-axis quantized input not supported.");
1262 
1263  std::array<DataType,3> supportedOutputTypes = {
1266  DataType::Float16
1267  };
1268 
1269  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1270  "Reference for Dequantize layer: output type not supported.");
1271 
1272  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1273  "Reference for Dequantize layer: input/output shapes have different num total "
1274  "elements.");
1275 
1276  return supported;
1277 }
1278 
1280  const TensorInfo& scores,
1281  const TensorInfo& anchors,
1282  const TensorInfo& detectionBoxes,
1284  const TensorInfo& detectionScores,
1285  const TensorInfo& numDetections,
1288 {
1289  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1290 
1291  bool supported = true;
1292 
1293  std::array<DataType,6> supportedInputTypes =
1294  {
1301  };
1302 
1303  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1304  "Reference DetectionPostProcess: input 0 is not a supported type.");
1305 
1306  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1307  "Reference DetectionPostProcess: input 1 is not a supported type.");
1308 
1309  return supported;
1310 }
1311 
1313  const TensorInfo& output,
1315  const TensorInfo& weights,
1318 {
1319  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1320 }
1321 
1323  const TensorInfo& input1,
1324  const TensorInfo& output,
1326 {
1327  bool supported = true;
1328 
1329  std::array<DataType,7> supportedTypes = {
1337  };
1338 
1339  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1340  "Reference division: input 0 is not a supported type.");
1341 
1342  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1343  "Reference division: input 1 is not a supported type.");
1344 
1345  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1346  "Reference division: output is not a supported type.");
1347 
1348  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1349  "Reference division: input 0 and Input 1 types are mismatched");
1350 
1351  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1352  "Reference division: input and output types are mismatched");
1353 
1354  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1355  "Reference division: shapes are not suitable for implicit broadcast.");
1356 
1357  return supported;
1358 }
1359 
1361  const TensorInfo& output,
1364 {
1365  IgnoreUnused(descriptor);
1366 
1367  std::array<DataType, 7> supportedTypes =
1368  {
1376  };
1377 
1378  std::array<DataType, 1> logicalSupportedTypes =
1379  {
1381  };
1382 
1383  bool supported = true;
1384 
1385  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1386  {
1387  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1388  "Reference elementwise unary: input type not supported");
1389 
1390  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1391  "Reference elementwise unary: output type not supported");
1392  }
1393  else
1394  {
1395  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1396  "Reference elementwise unary: input type not supported");
1397 
1398  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1399  "Reference elementwise unary: output type not supported");
1400  }
1401 
1402  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1403  "Reference elementwise unary: input and output types not matching");
1404 
1405  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1406  "Reference elementwise unary: input and output shapes"
1407  "have different number of total elements");
1408 
1409  return supported;
1410 }
1411 
1415 {
1416  IgnoreUnused(descriptor);
1417  bool supported = true;
1418 
1419  std::array<DataType,1> supportedTypes =
1420  {
1422  };
1423 
1424  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1425  "Reference fake quantization: input type not supported.");
1426 
1427  return supported;
1428 }
1429 
1431  const TensorInfo& output,
1432  const FillDescriptor& descriptor,
1434 {
1435  IgnoreUnused(descriptor);
1436  IgnoreUnused(output);
1437 
1438  bool supported = true;
1439 
1440  std::array<DataType,3> supportedTypes =
1441  {
1445  };
1446 
1447  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1448  "Reference Fill: input type not supported.");
1449 
1450  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1451  "Reference Fill: output type not supported.");
1452  return supported;
1453 }
1454 
1456  const TensorInfo& output,
1458 {
1459  IgnoreUnused(output);
1460  bool supported = true;
1461 
1462  std::array<DataType,3> supportedTypes =
1463  {
1467  };
1468 
1469  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1470  "Reference Floor: input type not supported.");
1471 
1472  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1473  "Reference Floor: output type not supported.");
1474 
1475  return supported;
1476 }
1477 
1479  const TensorInfo& output,
1480  const TensorInfo& weights,
1481  const TensorInfo& biases,
1484 {
1485  bool supported = true;
1486 
1487  // Define supported types.
1488  std::array<DataType,6> supportedTypes =
1489  {
1496  };
1497 
1498  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1499  "Reference Fully Connected: input type not supported.");
1500 
1501  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1502  "Reference Fully Connected: output type not supported.");
1503 
1504  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1505  "Reference Fully Connected: weights type not supported.");
1506 
1507  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1508  if (input.GetDataType() == DataType::BFloat16)
1509  {
1510  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
1511  {
1512  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1513  supported = false;
1514  }
1515  }
1516  else
1517  {
1518  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1519  "Reference Fully Connected: input and output types mismatched.");
1520  }
1521 
1522  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1523  "Reference Fully Connected: weights is not a supported type.");
1524 
1525  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1526  "Reference Fully Connected: input and weights types mismatched.");
1527 
1528  if (descriptor.m_BiasEnabled)
1529  {
1530  // Defined supported types for bias
1531  std::array<DataType, 5>
1532  supportedBiasTypes =
1533  {
1539  };
1540 
1541  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1542  "Reference Fully Connected: bias type not supported.");
1543 
1544  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1545  "Reference Fully Connected: bias and weight types mismatch.");
1546 
1547  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1548  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1549 
1550  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1551  "Reference Fully Connected: bias must have 1 dimension.");
1552 
1553  }
1554 
1555  return supported;
1556 }
1557 
1559  const armnn::TensorInfo& input1,
1560  const armnn::TensorInfo& output,
1562 {
1563  bool supported = true;
1564  std::array<DataType,7> supportedTypes =
1565  {
1573  };
1574 
1575  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1576  "Reference GatherNd: input type not supported");
1577 
1578  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1579  "Reference GatherNd: output type not supported");
1580 
1581  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1582  "Reference GatherNd: indices (input1) type not supported");
1583 
1584  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1585  "Reference GatherNd: input and output types not matching");
1586 
1587  return supported;
1588 }
1589 
1591  const armnn::TensorInfo& input1,
1592  const armnn::TensorInfo& output,
1595 {
1596  bool supported = true;
1597  std::array<DataType,7> supportedTypes =
1598  {
1606  };
1607 
1608  if (descriptor.m_Axis != 0)
1609  {
1610  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1611  supported &= false;
1612  }
1613  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1614  "Reference Gather: input type not supported");
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1617  "Reference Gather: output type not supported");
1618 
1619  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1620  "Reference Gather: indices (input1) type not supported");
1621 
1622  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1623  "Reference Gather: input and output types not matching");
1624 
1625  return supported;
1626 }
1627 
1629  Optional<std::string&> /*reasonIfUnsupported*/) const
1630 {
1631  return true;
1632 }
1633 
1635  const TensorInfo& output,
1638 {
1639  IgnoreUnused(descriptor);
1640  // Define supported types
1641  std::array<DataType, 3> supportedTypes =
1642  {
1646  };
1647 
1648  bool supported = true;
1649 
1650  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1651  "Reference Instance Normalization: input type not supported.");
1652 
1653  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1654  "Reference Instance Normalization: output type not supported.");
1655 
1656  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1657  "Reference Instance Normalization: input and output types mismatched.");
1658 
1659  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1660  "Reference Instance Normalization: input and output shapes have different "
1661  "num total elements.");
1662 
1663  return supported;
1664 }
1665 
1667  const TensorInfo& output,
1670 {
1671  IgnoreUnused(descriptor);
1672  // Define supported types
1673  std::array<DataType, 6> supportedTypes =
1674  {
1681  };
1682 
1683  bool supported = true;
1684 
1685  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1686  "Reference L2normalization: input type not supported.");
1687 
1688  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1689  "Reference L2normalization: output type not supported.");
1690 
1691  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1692  "Reference L2normalization: input and output types mismatched.");
1693 
1694  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1695  "Reference L2normalization: input and output shapes have different "
1696  "num total elements.");
1697 
1698  return supported;
1699 }
1700 
1702  const TensorInfo& input1,
1703  const TensorInfo& output,
1706 {
1707  IgnoreUnused(descriptor);
1708 
1709  std::array<DataType, 1> supportedTypes =
1710  {
1712  };
1713 
1714  bool supported = true;
1715  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1716  "Reference LogicalBinary: input 0 type not supported");
1717  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1718  "Reference LogicalBinary: input 1 type not supported");
1719 
1720  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1721  "Reference LogicalBinary: input and output types do not match");
1722 
1723  return supported;
1724 }
1725 
1727  const TensorInfo& output,
1730 {
1731  IgnoreUnused(descriptor);
1732 
1733  std::array<DataType, 3> supportedTypes =
1734  {
1738  };
1739 
1740  bool supported = true;
1741  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1742  "Reference LogSoftmax: input type not supported");
1743 
1744  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1745  "Reference LogSoftmax: output type not supported");
1746 
1747  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1748  "Reference LogSoftmax: input and output types do not match");
1749 
1750  return supported;
1751 }
1752 
1754  const TensorInfo& outputStateIn,
1755  const TensorInfo& cellStateIn,
1756  const TensorInfo& scratchBuffer,
1757  const TensorInfo& outputStateOut,
1758  const TensorInfo& cellStateOut,
1759  const TensorInfo& output,
1760  const LstmDescriptor& descriptor,
1763 {
1764  IgnoreUnused(descriptor);
1765  IgnoreUnused(paramsInfo);
1766 
1767  bool supported = true;
1768 
1769  std::array<DataType,3> supportedTypes = {
1773  };
1774 
1775  // check inputs and outputs
1776  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1777  "Reference Lstm: input is not a supported type.");
1778  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1779  "Reference Lstm: input and outputStateIn types are mismatched");
1780  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1781  "Reference Lstm: input and cellStateIn types are mismatched");
1782  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1783  "Reference Lstm: input and scratchBuffer types are mismatched");
1784  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1785  "Reference Lstm: input and outputStateOut types are mismatched");
1786  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1787  "Reference Lstm: input and cellStateOut types are mismatched");
1788 
1789  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1790  "Reference Lstm: input and output types are mismatched");
1791  // check layer parameters
1792  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1793  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1794  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1795  "Reference Lstm: input and InputToCellWeights types are mismatched");
1796  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1797  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1798  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1799  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1800  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1801  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1802  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1803  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1804  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1805  "Reference Lstm: input and ForgetGateBias types are mismatched");
1806  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1807  "Reference Lstm: input and CellBias types are mismatched");
1808  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1809  "Reference Lstm: input and OutputGateBias types are mismatched");
1810  if (!descriptor.m_CifgEnabled)
1811  {
1812  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1813  "Reference Lstm: input and InputToInputWeights types are mismatched");
1814  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1815  reasonIfUnsupported,
1816  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1818  "Reference Lstm: input and InputGateBias types are mismatched");
1819  if (descriptor.m_PeepholeEnabled)
1820  {
1821  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1822  reasonIfUnsupported,
1823  "Reference Lstm: input and CellToInputWeights types are mismatched");
1824  }
1825  }
1826  if (descriptor.m_PeepholeEnabled)
1827  {
1828  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1829  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1830  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1831  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1832  }
1833  if (descriptor.m_ProjectionEnabled)
1834  {
1835  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1836  "Reference Lstm: input and mProjectionWeights types are mismatched");
1837  if (paramsInfo.m_ProjectionBias != nullptr)
1838  {
1839  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1840  "Reference Lstm: input and ProjectionBias types are mismatched");
1841  }
1842  }
1843  if (descriptor.m_LayerNormEnabled)
1844  {
1845  if (!descriptor.m_CifgEnabled)
1846  {
1847  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1848  reasonIfUnsupported,
1849  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1850  }
1851  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1852  reasonIfUnsupported,
1853  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1854  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1855  reasonIfUnsupported,
1856  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1857  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1858  reasonIfUnsupported,
1859  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1860  }
1861 
1862  return supported;
1863 }
1864 
1866  const TensorInfo& input1,
1867  const TensorInfo& output,
1869 {
1870  bool supported = true;
1871 
1872  std::array<DataType,7> supportedTypes = {
1880  };
1881 
1882  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1883  "Reference maximum: input 0 is not a supported type.");
1884 
1885  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1886  "Reference maximum: input 1 is not a supported type.");
1887 
1888  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1889  "Reference maximum: output is not a supported type.");
1890 
1891  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1892  "Reference maximum: input 0 and Input 1 types are mismatched");
1893 
1894  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1895  "Reference maximum: input and output types are mismatched");
1896 
1897  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1898  "Reference maximum: shapes are not suitable for implicit broadcast.");
1899 
1900  return supported;
1901 }
1902 
1904  const TensorInfo& output,
1905  const MeanDescriptor& descriptor,
1907 {
1908  bool supported = true;
1909  std::string meanLayerStr = "Mean";
1910  std::string outputTensorStr = "output";
1911 
1912  std::array<DataType,6> supportedTypes =
1913  {
1920  };
1921 
1922  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1923  "Reference Mean: input type not supported.");
1924 
1925  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1926  "Reference Mean: input and output types are mismatched");
1927 
1928  if (descriptor.m_KeepDims)
1929  {
1930  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1931  reasonIfUnsupported,
1932  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1933  output.GetNumDimensions(),
1934  meanLayerStr, outputTensorStr).data());
1935  }
1936  else if (descriptor.m_Axis.empty())
1937  {
1938  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1939  reasonIfUnsupported,
1940  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1941  meanLayerStr, outputTensorStr).data());
1942  }
1943  else
1944  {
1945  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1946 
1947  if (outputDim > 0)
1948  {
1949  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1950  reasonIfUnsupported,
1951  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1952  meanLayerStr, outputTensorStr).data());
1953  }
1954  else
1955  {
1956  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1957  reasonIfUnsupported,
1958  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1959  meanLayerStr, outputTensorStr).data());
1960  }
1961  }
1962 
1963  return supported;
1964 }
1965 
1967  const TensorInfo &output,
1969 {
1970  bool supported = true;
1971 
1972  std::array<DataType,7> supportedTypes =
1973  {
1981  };
1982 
1983  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1984  "Reference MemCopy: input type not supported");
1985 
1986  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1987  "Reference MemCopy: output type not supported");
1988 
1989  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1990  "Reference MemCopy: input and output types are mismatched");
1991 
1992  return supported;
1993 }
1994 
1996  const TensorInfo& input1,
1997  const TensorInfo& output,
1999 {
2000  bool supported = true;
2001 
2002  std::array<DataType,7> supportedTypes = {
2010  };
2011 
2012  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2013  "Reference minimum: input 0 is not a supported type.");
2014 
2015  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2016  "Reference minimum: input 1 is not a supported type.");
2017 
2018  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2019  "Reference minimum: output is not a supported type.");
2020 
2021  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2022  "Reference minimum: input 0 and Input 1 types are mismatched");
2023 
2024  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2025  "Reference minimum: input and output types are mismatched");
2026 
2027  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2028  "Reference minimum: shapes are not suitable for implicit broadcast.");
2029 
2030  return supported;
2031 }
2032 
2034  const TensorInfo& input1,
2035  const TensorInfo& output,
2037 {
2038  bool supported = true;
2039 
2040  std::array<DataType,7> supportedTypes = {
2048  };
2049 
2050  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2051  "Reference multiplication: input 0 is not a supported type.");
2052 
2053  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2054  "Reference multiplication: input 1 is not a supported type.");
2055 
2056  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2057  "Reference multiplication: output is not a supported type.");
2058 
2059  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2060  "Reference multiplication: input 0 and Input 1 types are mismatched");
2061 
2062  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2063  "Reference multiplication: input and output types are mismatched");
2064 
2065  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2066  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2067 
2068  return supported;
2069 }
2070 
2072  const TensorInfo& output,
2075 {
2076  IgnoreUnused(descriptor);
2077 
2078  // Define supported types
2079  std::array<DataType, 6> supportedTypes =
2080  {
2087  };
2088 
2089  bool supported = true;
2090 
2091  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2092  "Reference normalization: input type not supported.");
2093 
2094  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2095  "Reference normalization: output type not supported.");
2096 
2097  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2098  "Reference normalization: input and output shapes have different "
2099  "num total elements.");
2100 
2101  return supported;
2102 }
2103 
2105  Optional<std::string&> /*reasonIfUnsupported*/) const
2106 {
2107  return true;
2108 }
2109 
2111  const TensorInfo& output,
2112  const PadDescriptor& descriptor,
2114 {
2115  IgnoreUnused(descriptor);
2116  bool supported = true;
2117 
2118  // Define supported output and inputs types.
2119  std::array<DataType,6> supportedTypes =
2120  {
2127  };
2128 
2129  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2130  "Reference pad: input is not a supported type.");
2131 
2132  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2133  "Reference pad: output is not a supported type.");
2134 
2135  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2136  "Reference pad: input and output types are mismatched.");
2137 
2138  return supported;
2139 }
2140 
2142  const TensorInfo& output,
2145 {
2146  IgnoreUnused(descriptor);
2147  bool supported = true;
2148 
2149  // Define supported output and inputs types.
2150  std::array<DataType, 6> supportedTypes =
2151  {
2158  };
2159 
2160  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2161  "Reference permute: input is not a supported type.");
2162 
2163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2164  "Reference permute: output is not a supported type.");
2165 
2166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2167  "Reference permute: input and output types are mismatched.");
2168 
2169  return supported;
2170 }
2171 
2173  const TensorInfo& output,
2176 {
2177  IgnoreUnused(descriptor);
2178  bool supported = true;
2179 
2180  // Define supported output and inputs types.
2181  std::array<DataType,6> supportedTypes =
2182  {
2189  };
2190 
2191  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2192  "Reference poolind2d: input is not a supported type.");
2193 
2194  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2195  "Reference poolind2d: output is not a supported type.");
2196 
2197  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2198  "Reference poolind2d: input and output types are mismatched.");
2199 
2200  return supported;
2201 }
2202 
2204  const TensorInfo& output,
2207 {
2208  IgnoreUnused(descriptor);
2209  bool supported = true;
2210 
2211  // Define supported output and inputs types.
2212  std::array<DataType,6> supportedTypes =
2213  {
2220  };
2221 
2222  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2223  "Reference poolind3d: input is not a supported type.");
2224 
2225  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2226  "Reference poolind3d: output is not a supported type.");
2227 
2228  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2229  "Reference poolind3d: input and output types are mismatched.");
2230 
2231  return supported;
2232 }
2233 
2234 
2238  const TensorInfo& outputStateOut,
2239  const TensorInfo& cellStateOut,
2240  const TensorInfo& output,
2241  const QLstmDescriptor& descriptor,
2244 {
2245  IgnoreUnused(input);
2246  IgnoreUnused(previousOutputIn);
2247  IgnoreUnused(previousCellStateIn);
2248  IgnoreUnused(outputStateOut);
2249  IgnoreUnused(cellStateOut);
2250  IgnoreUnused(output);
2251  IgnoreUnused(descriptor);
2252  IgnoreUnused(paramsInfo);
2253 
2254  IgnoreUnused(reasonIfUnsupported);
2255 
2256  return true;
2257 }
2258 
2260  const TensorInfo& output,
2262 {
2263  bool supported = true;
2264 
2265  // Define supported input types.
2266  std::array<DataType,7> supportedInputTypes = {
2274  };
2275 
2276  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2277  "Reference quantize: input type not supported.");
2278 
2279  // Define supported output types.
2280  std::array<DataType,4> supportedOutputTypes = {
2284  DataType::QSymmS16
2285  };
2286  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2287  "Reference quantize: output type not supported.");
2288 
2289  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2290  "Reference quantize: input and output shapes have different num total elements.");
2291 
2292  return supported;
2293 }
2294 
2296  const TensorInfo& output,
2298 {
2299  IgnoreUnused(input);
2300  // Define supported output types.
2301  std::array<DataType,1> supportedOutputTypes =
2302  {
2304  };
2305 
2306  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2307  "Reference rank: input type not supported.");
2308 }
2309 
2311  const TensorInfo& output,
2314 {
2315  IgnoreUnused(descriptor);
2316  bool supported = true;
2317  std::array<DataType,7> supportedTypes =
2318  {
2326  };
2327 
2328  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2329  "Reference Reduce: input type not supported");
2330 
2331  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2332  "Reference Reduce: output type not supported");
2333 
2334  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2335  "Reference Reduce: input and output types not matching");
2336 
2337  return supported;
2338 }
2339 
2341  const TensorInfo& output,
2344 {
2345  IgnoreUnused(output);
2346  IgnoreUnused(descriptor);
2347  // Define supported output types.
2348  std::array<DataType,8> supportedOutputTypes =
2349  {
2358  };
2359 
2360  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2361  "Reference reshape: input type not supported.");
2362 }
2363 
2365  const TensorInfo& output,
2368 {
2369  IgnoreUnused(descriptor);
2370  bool supported = true;
2371  std::array<DataType,6> supportedTypes =
2372  {
2379  };
2380 
2381  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2382  "Reference Resize: input type not supported");
2383 
2384  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2385  "Reference Resize: output type not supported");
2386 
2387  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2388  "Reference Resize: input and output types not matching");
2389 
2390  return supported;
2391 }
2392 
2394  const TensorInfo& output,
2396 {
2397  IgnoreUnused(input);
2398  bool supported = true;
2399 
2400  std::array<DataType, 1> supportedTypes =
2401  {
2403  };
2404 
2405  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2406  "Reference Shape: output type not supported");
2407 
2408  return supported;
2409 }
2410 
2412  const TensorInfo& output,
2413  const SliceDescriptor& descriptor,
2415 {
2416  IgnoreUnused(descriptor);
2417  bool supported = true;
2418 
2419  std::array<DataType, 5> supportedTypes =
2420  {
2426  };
2427 
2428  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2429  "Reference Slice: input type not supported");
2430 
2431  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2432  "Reference Slice: output type not supported");
2433 
2434  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2435  "Reference Slice: input and output types are mismatched");
2436 
2437  return supported;
2438 }
2439 
2441  const TensorInfo& output,
2444 {
2445  IgnoreUnused(descriptor);
2446  bool supported = true;
2447  std::array<DataType,7> supportedTypes =
2448  {
2456  };
2457 
2458  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2459  "Reference Softmax: output type not supported");
2460 
2461  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2462  "Reference Softmax: input type not supported");
2463 
2464  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2465  "Reference Softmax: input type not supported");
2466 
2467  return supported;
2468 }
2469 
2471  const TensorInfo& output,
2474 {
2475  IgnoreUnused(descriptor);
2476  bool supported = true;
2477  std::array<DataType,6> supportedTypes =
2478  {
2485  };
2486 
2487  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2488  "Reference SpaceToBatchNd: input type not supported");
2489 
2490  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2491  "Reference SpaceToBatchNd: output type not supported");
2492 
2493  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2494  "Reference SpaceToBatchNd: input and output types are mismatched");
2495 
2496  return supported;
2497 }
2498 
2500  const TensorInfo& output,
2503 {
2504 
2505  IgnoreUnused(descriptor);
2506  bool supported = true;
2507 
2508  std::array<DataType,6> supportedTypes =
2509  {
2516  };
2517 
2518  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2519  "Reference SpaceToDepth: input type not supported");
2520 
2521  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2522  "Reference SpaceToDepth: output type not supported");
2523 
2524  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2525  "Reference SpaceToDepth: input and output types are mismatched");
2526 
2527  return supported;
2528 }
2529 
2531  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2532  const ViewsDescriptor& descriptor,
2534 {
2535  IgnoreUnused(descriptor);
2536  bool supported = true;
2537  std::array<DataType,6> supportedTypes =
2538  {
2545  };
2546 
2547  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2548  "Reference splitter: output type not supported");
2549  for (const TensorInfo& output : outputs)
2550  {
2551  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2552  "Reference splitter: input type not supported");
2553 
2554  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2555  "Reference splitter: input and output types mismatched.");
2556  }
2557 
2558  return supported;
2559 }
2560 
2561 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2562  const TensorInfo& output,
2563  const StackDescriptor& descriptor,
2565 {
2566  IgnoreUnused(descriptor);
2567 
2568  bool supported = true;
2569  std::array<DataType,7> supportedTypes =
2570  {
2578  };
2579 
2580  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2581  "Reference stack: output type not supported");
2582  for (const TensorInfo* input : inputs)
2583  {
2584  ARMNN_ASSERT(input != nullptr);
2585  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2586  "Reference stack: input type not supported");
2587 
2588  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2589  "Reference stack: input and output types mismatched.");
2590  }
2591 
2592  return supported;
2593 }
2594 
2596  const TensorInfo& output,
2599 {
2600  IgnoreUnused(descriptor);
2601  bool supported = true;
2602 
2603  std::array<DataType,5> supportedTypes =
2604  {
2610  };
2611 
2612  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2613  "Reference StridedSlice: input type not supported");
2614 
2615  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2616  "Reference StridedSlice: output type not supported");
2617 
2618  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2619  "Reference StridedSlice: input and output types are mismatched");
2620 
2621  return supported;
2622 }
2623 
2625  const TensorInfo& input1,
2626  const TensorInfo& output,
2628 {
2629  bool supported = true;
2630 
2631  std::array<DataType,7> supportedTypes = {
2639  };
2640 
2641  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2642  "Reference subtraction: input 0 is not a supported type.");
2643 
2644  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2645  "Reference subtraction: input 1 is not a supported type.");
2646 
2647  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2648  "Reference subtraction: output is not a supported type.");
2649 
2650  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2651  "Reference subtraction: input 0 and Input 1 types are mismatched");
2652 
2653  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2654  "Reference subtraction: input and output types are mismatched");
2655 
2656  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2657  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2658 
2659  return supported;
2660 }
2661 
2663  const TensorInfo& alpha,
2664  const TensorInfo& output,
2666 {
2667  bool supported = true;
2668 
2669  std::array<DataType, 6> supportedTypes
2670  {
2677  };
2678 
2679  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2680  "PReLU: input is not a supported type.");
2681 
2682  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2683  "PReLU: alpha is not a supported type.");
2684 
2685  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2686  "PReLU: output is not a supported type.");
2687 
2688  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2689  "PReLU: input, alpha and output types are mismatched");
2690 
2691  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2692  "PReLU: shapes are not suitable for implicit broadcast");
2693 
2694  return supported;
2695 }
2696 
2698  const TensorInfo& output,
2700  const TensorInfo& weights,
2703 {
2704  IgnoreUnused(descriptor);
2705  bool supported = true;
2706 
2707  std::array<DataType,7> supportedTypes =
2708  {
2716  };
2717 
2718  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2719  "Reference TransposeConvolution2d: input is not a supported type.");
2720 
2721  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2722  "Reference TransposeConvolution2d: output is not a supported type.");
2723 
2724  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2725  "Reference TransposeConvolution2d: input and output types mismatched.");
2726 
2727 
2728  const DataType inputType = input.GetDataType();
2729  if (IsQuantized8BitType(inputType))
2730  {
2731  std::array<DataType, 3> supportedWeightTypes =
2732  {
2736  };
2737 
2738  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2739  "Reference TransposeConvolution2d: weights type not supported for "
2740  "quantized input.");
2741  }
2742  else
2743  {
2744  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2745  "Reference TransposeConvolution2d: weights is not a supported type.");
2746 
2747  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2748  "Reference TransposeConvolution2d: input and weights types mismatched.");
2749  }
2750 
2751  if (biases.has_value())
2752  {
2753  std::array<DataType,4> biasesSupportedTypes =
2754  {
2759  };
2760  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2761  "Reference TransposeConvolution2d: biases is not a supported type.");
2762  }
2763 
2764  return supported;
2765 }
2766 
2768  const TensorInfo& output,
2771 {
2772  IgnoreUnused(descriptor);
2773  bool supported = true;
2774 
2775  // Define supported output and inputs types.
2776  std::array<DataType, 6> supportedTypes =
2777  {
2784  };
2785 
2786  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2787  "Reference transpose: input is not a supported type.");
2788 
2789  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2790  "Reference transpose: output is not a supported type.");
2791 
2792  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2793  "Reference transpose: input and output types are mismatched.");
2794 
2795  return supported;
2796 }
2797 
2799  const TensorInfo& input,
2800  const TensorInfo& outputStateIn,
2801  const TensorInfo& cellStateIn,
2802  const TensorInfo& outputStateOut,
2803  const TensorInfo& cellStateOut,
2804  const TensorInfo& output,
2808 {
2809  IgnoreUnused(descriptor);
2810  IgnoreUnused(paramsInfo);
2811  IgnoreUnused(outputStateIn);
2812  IgnoreUnused(cellStateIn);
2813  IgnoreUnused(outputStateOut);
2814  IgnoreUnused(cellStateOut);
2815  bool supported = true;
2816 
2817  std::array<DataType, 2> supportedTypes =
2818  {
2821  };
2822 
2823  std::array<DataType, 2> supportedWeightTypes =
2824  {
2826  DataType::QAsymmS8
2827  };
2828 
2829  std::array<DataType, 3> supportedBiasTypes =
2830  {
2834  };
2835 
2836  // check inputs and outputs
2837  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2838  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2839  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2840  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2841 
2842  // check layer parameters
2843  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2844  reasonIfUnsupported,
2845  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2846  "is not a supported type.");
2847  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2848  reasonIfUnsupported,
2849  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2850  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2851  reasonIfUnsupported,
2852  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2853  "is not a supported type.");
2854  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2855  reasonIfUnsupported,
2856  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2857  "is not a supported type.");
2858  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2859  reasonIfUnsupported,
2860  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2861  "is not a supported type.");
2862  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2863  reasonIfUnsupported,
2864  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2865  "is not a supported type.");
2866 
2867  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2868  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2869  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2870  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2871  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2872  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2873  if (!descriptor.m_CifgEnabled)
2874  {
2875  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2876  reasonIfUnsupported,
2877  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2878  "is not a supported type.");
2879  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2880  reasonIfUnsupported,
2881  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2882  "is not a supported type.");
2883  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2884  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2885  if (descriptor.m_PeepholeEnabled)
2886  {
2887  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2888  reasonIfUnsupported,
2889  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2890  "is not a supported type.");
2891  }
2892  }
2893  if (descriptor.m_PeepholeEnabled)
2894  {
2895  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2896  reasonIfUnsupported,
2897  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2898  "is not a supported type.");
2899  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2900  reasonIfUnsupported,
2901  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2902  "is not a supported type.");
2903  }
2904  if (descriptor.m_ProjectionEnabled)
2905  {
2906  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2907  reasonIfUnsupported,
2908  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2909  "is not a supported type.");
2910  if (paramsInfo.m_ProjectionBias != nullptr)
2911  {
2912  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2913  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2914  "are mismatched");
2915  }
2916  }
2917  if (descriptor.m_LayerNormEnabled)
2918  {
2919  if (!descriptor.m_CifgEnabled)
2920  {
2921  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2922  reasonIfUnsupported,
2923  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2924  "is not a supported type.");
2925  }
2926  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2927  reasonIfUnsupported,
2928  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2929  "is not a supported type.");
2930  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2931  reasonIfUnsupported,
2932  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2933  "is not a supported type.");
2934  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2935  reasonIfUnsupported,
2936  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2937  "is not a supported type.");
2938  }
2939 
2940  return supported;
2941 }
2942 
2943 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
const TensorInfo const TensorInfo & anchors
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & output
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
const TensorInfo & scores
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
const TensorInfo const ActivationDescriptor & descriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & previousCellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
const TensorInfo & alpha
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PadDescriptor for the PadLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:48
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo & cellStateIn
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_BiasEnabled
Enable/disable bias.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
min(a, max(b, input)) ReLu1 & ReLu6.
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
const TensorInfo & previousOutputIn
A Pooling3dDescriptor for the Pooling3dLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
A MeanDescriptor for the MeanLayer.
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & input1
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo & mean
A PermuteDescriptor for the PermuteLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467