ArmNN
 22.02
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74  reasonIfUnsupported);
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81  reasonIfUnsupported);
83  return IsBatchNormalizationSupported(infos[0],
84  infos[1],
85  infos[2],
86  infos[3],
87  infos[4],
88  infos[5],
89  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
90  (&descriptor)),
91  reasonIfUnsupported);
93  return IsBatchToSpaceNdSupported(infos[0],
94  infos[1],
95  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
96  reasonIfUnsupported);
98  return IsComparisonSupported(infos[0],
99  infos[1],
100  infos[2],
101  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
102  reasonIfUnsupported);
103  case LayerType::Concat:
104  {
105  std::vector<const TensorInfo*> inputInfos;
106  for (uint32_t i = 0; i < (infos.size() - 1); i++)
107  {
108  inputInfos.push_back(&infos[i]);
109  }
110  return IsConcatSupported(inputInfos,
111  infos[infos.size() - 1],
112  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
114  }
115  case LayerType::Constant:
116  return IsConstantSupported(infos[0], reasonIfUnsupported);
118  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
120  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
122  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
124  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
126  {
127  if (infos.size() != 4)
128  {
129  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130  "TensorInfos should be of format: {input, output, weights, biases}.");
131  }
132 
133  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
134  if (infos[3] == TensorInfo())
135  {
136  return IsConvolution2dSupported(infos[0],
137  infos[1],
138  desc,
139  infos[2],
140  EmptyOptional(),
141  reasonIfUnsupported);
142  }
143  else
144  {
145  return IsConvolution2dSupported(infos[0],
146  infos[1],
147  desc,
148  infos[2],
149  infos[3],
150  reasonIfUnsupported);
151  }
152  }
154  return IsDepthToSpaceSupported(infos[0],
155  infos[1],
156  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
157  reasonIfUnsupported);
159  {
160  if (infos.size() != 4)
161  {
162  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
163  "TensorInfos should be of format: {input, output, weights, biases}.");
164  }
165 
166  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
167  if (infos[3] == TensorInfo())
168  {
169  return IsDepthwiseConvolutionSupported(infos[0],
170  infos[1],
171  desc,
172  infos[2],
173  EmptyOptional(),
174  reasonIfUnsupported);
175  }
176  else
177  {
178  return IsDepthwiseConvolutionSupported(infos[0],
179  infos[1],
180  desc,
181  infos[2],
182  infos[3],
183  reasonIfUnsupported);
184  }
185  }
187  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
188  case LayerType::Division:
189  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
191  return IsElementwiseUnarySupported(infos[0],
192  infos[1],
193  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
194  reasonIfUnsupported);
195  case LayerType::Fill:
196  return IsFillSupported(infos[0],
197  infos[1],
198  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
199  reasonIfUnsupported);
200  case LayerType::Floor:
201  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
203  return IsFullyConnectedSupported(infos[0],
204  infos[1],
205  infos[2],
206  infos[3],
207  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
208  reasonIfUnsupported);
209  case LayerType::Gather:
210  return IsGatherSupported(infos[0],
211  infos[1],
212  infos[2],
213  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
214  reasonIfUnsupported);
215  case LayerType::Input:
216  return IsInputSupported(infos[0], reasonIfUnsupported);
218  return IsInstanceNormalizationSupported(infos[0],
219  infos[1],
220  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
221  (&descriptor)),
222  reasonIfUnsupported);
224  return IsL2NormalizationSupported(infos[0],
225  infos[1],
226  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
227  reasonIfUnsupported);
229  return IsLogicalBinarySupported(infos[0],
230  infos[1],
231  infos[2],
232  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
233  reasonIfUnsupported);
235  return IsLogSoftmaxSupported(infos[0],
236  infos[1],
237  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
238  reasonIfUnsupported);
239  case LayerType::Lstm:
240  return IsLstmSupported(infos[0],
241  infos[1],
242  infos[2],
243  infos[3],
244  infos[4],
245  infos[5],
246  infos[6],
247  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
248  lstmParamsInfo.value(),
250  case LayerType::QLstm:
251  return IsQLstmSupported(infos[0],
252  infos[1],
253  infos[2],
254  infos[3],
255  infos[4],
256  infos[5],
257  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
258  lstmParamsInfo.value(),
260  case LayerType::Maximum:
261  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
262  case LayerType::Mean:
263  return IsMeanSupported(infos[0],
264  infos[1],
265  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
266  reasonIfUnsupported);
267  case LayerType::Minimum:
268  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
270  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
272  return IsNormalizationSupported(infos[0],
273  infos[1],
274  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
275  reasonIfUnsupported);
276  case LayerType::Output:
277  return IsOutputSupported(infos[0], reasonIfUnsupported);
278  case LayerType::Pad:
279  return IsPadSupported(infos[0],
280  infos[1],
281  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
282  reasonIfUnsupported);
283  case LayerType::Permute:
284  return IsPermuteSupported(infos[0],
285  infos[1],
286  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
287  reasonIfUnsupported);
289  return IsPooling2dSupported(infos[0],
290  infos[1],
291  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
292  reasonIfUnsupported);
293  case LayerType::Prelu:
294  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
295  case LayerType::Quantize:
296  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
297  case LayerType::Reshape:
298  return IsReshapeSupported(infos[0],
299  infos[1],
300  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
301  reasonIfUnsupported);
302  case LayerType::Resize:
303  return IsResizeSupported(infos[0],
304  infos[1],
305  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
306  reasonIfUnsupported);
307  case LayerType::Reduce:
308  return IsReduceSupported(infos[0],
309  infos[1],
310  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
311  reasonIfUnsupported);
312  case LayerType::Slice:
313  return IsSliceSupported(infos[0],
314  infos[1],
315  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
316  reasonIfUnsupported);
317  case LayerType::Softmax:
318  return IsSoftmaxSupported(infos[0],
319  infos[1],
320  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
321  reasonIfUnsupported);
323  return IsSpaceToBatchNdSupported(infos[0],
324  infos[1],
325  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
326  reasonIfUnsupported);
328  return IsSpaceToDepthSupported(infos[0],
329  infos[1],
330  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
331  reasonIfUnsupported);
332  case LayerType::Splitter:
333  {
334  std::vector<TensorInfo> outputInfos;
335  for (uint32_t i = 1; i < infos.size(); i++)
336  {
337  outputInfos.push_back(infos[i]);
338  }
339  return IsSplitterSupported(infos[0],
340  {outputInfos.begin(), outputInfos.end()},
341  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
343  }
344  case LayerType::Stack:
345  {
346  std::vector<const TensorInfo*> inputInfos;
347  for (uint32_t i = 0; i < infos.size() - 1; i++)
348  {
349  inputInfos.push_back(&infos[i]);
350  }
351  return IsStackSupported(inputInfos,
352  infos[infos.size() - 1],
353  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
355  }
357  return IsStridedSliceSupported(infos[0],
358  infos[1],
359  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
360  reasonIfUnsupported);
362  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
364  return IsTransposeSupported(infos[0],
365  infos[1],
366  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
367  reasonIfUnsupported);
369  {
370  if (infos.size() != 4)
371  {
372  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
373  "TensorInfos should be of format: {input, output, weights, biases}.");
374  }
375 
376  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
377  if (infos[3] == TensorInfo())
378  {
379  return IsTransposeConvolution2dSupported(infos[0],
380  infos[1],
381  desc,
382  infos[2],
383  EmptyOptional(),
384  reasonIfUnsupported);
385  }
386  else
387  {
388  return IsTransposeConvolution2dSupported(infos[0],
389  infos[1],
390  desc,
391  infos[2],
392  infos[3],
393  reasonIfUnsupported);
394  }
395  }
396  case LayerType::Cast:
397  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
399  return IsChannelShuffleSupported(infos[0],
400  infos[1],
401  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
402  reasonIfUnsupported);
404  {
405  if (infos.size() != 4)
406  {
407  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
408  "TensorInfos should be of format: {input, output, weights, biases}.");
409  }
410 
411  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
412  if (infos[3] == TensorInfo())
413  {
414  return IsConvolution3dSupported(infos[0],
415  infos[1],
416  desc,
417  infos[2],
418  EmptyOptional(),
419  reasonIfUnsupported);
420  }
421  else
422  {
423  return IsConvolution3dSupported(infos[0],
424  infos[1],
425  desc,
426  infos[2],
427  infos[3],
428  reasonIfUnsupported);
429  }
430  }
431  case LayerType::Debug:
432  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
434  return IsDetectionPostProcessSupported(infos[0],
435  infos[1],
436  infos[2],
437  infos[3],
438  infos[4],
439  infos[5],
440  infos[6],
441  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
442  (&descriptor)),
443  reasonIfUnsupported);
445  return IsFakeQuantizationSupported(infos[0],
446  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
447  reasonIfUnsupported);
448  case LayerType::MemCopy:
449  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
450  case LayerType::Rank:
451  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
452  case LayerType::Shape:
453  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
455  {
456  if (infos.size() != 6)
457  {
458  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
459  "should be of format: {input, outputStateIn, cellStateIn, "
460  "hiddenStateOutputVal, cellStateOutputVal, output}");
461  }
462  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
463 
464  bool isHiddenStateOutputOptional = (infos[4] == TensorInfo());
465  bool isCellStateOutput = (infos[5] == TensorInfo());
466  if (isHiddenStateOutputOptional && isCellStateOutput)
467  {
469  infos[1],
470  infos[2],
471  infos[3],
472  EmptyOptional(),
473  EmptyOptional(),
474  desc,
475  lstmParamsInfo.value(),
477  }
478  else if (isHiddenStateOutputOptional)
479  {
481  infos[1],
482  infos[2],
483  infos[3],
484  EmptyOptional(),
485  infos[5],
486  desc,
487  lstmParamsInfo.value(),
489  }
490  else if (isCellStateOutput)
491  {
493  infos[1],
494  infos[2],
495  infos[3],
496  infos[4],
497  EmptyOptional(),
498  desc,
499  lstmParamsInfo.value(),
501  }
502  else
503  {
505  infos[1],
506  infos[2],
507  infos[3],
508  infos[4],
509  infos[5],
510  desc,
511  lstmParamsInfo.value(),
513  }
514  }
516  return IsPooling3dSupported(infos[0],
517  infos[1],
518  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
519  reasonIfUnsupported);
520  case LayerType::Map:
521  return true;
522  case LayerType::Unmap:
523  return true;
525  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
526  case LayerType::Merge:
527  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
530  infos[1],
531  infos[2],
532  infos[3],
533  infos[4],
534  quantizedLstmInputParamsInfo.value(),
536  default:
537  // layers not supported in neon by default:
538  // precompiled, standin, switch
539  return false;
540  }
541 }
542 
544  const TensorInfo& output,
547 {
548  bool supported = true;
549 
550  // Define supported types.
551  std::array<DataType,6> supportedTypes = {
558  };
559 
560  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
561  "Reference activation: input type not supported.");
562 
563  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
564  "Reference activation: output type not supported.");
565 
566  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
567  "Reference activation: input and output types mismatched.");
568 
569  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
570  "Reference activation: input and output shapes are of different rank.");
571 
572 
573  struct ActivationFunctionSupported : public Rule
574  {
575  ActivationFunctionSupported(const ActivationDescriptor& desc)
576  {
577  switch(desc.m_Function)
578  {
591  {
592  m_Res = true;
593  break;
594  }
595  default:
596  {
597  m_Res = false;
598  break;
599  }
600  }
601  }
602  };
603 
604  // Function is supported
605  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
606  "Reference activation: function not supported.");
607 
608  return supported;
609 }
610 
612  const TensorInfo& input1,
613  const TensorInfo& output,
615 {
616  bool supported = true;
617 
618  std::array<DataType,7> supportedTypes = {
626  };
627 
628  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
629  "Reference addition: input 0 is not a supported type.");
630 
631  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
632  "Reference addition: input 1 is not a supported type.");
633 
634  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
635  "Reference addition: output is not a supported type.");
636 
637  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
638  "Reference addition: input 0 and Input 1 types are mismatched");
639 
640  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
641  "Reference addition: input and output types are mismatched");
642 
643  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
644  "Reference addition: shapes are not suitable for implicit broadcast.");
645 
646  return supported;
647 }
648 
652 {
653  IgnoreUnused(descriptor);
654 
655  std::array<DataType, 8> supportedInputTypes =
656  {
665  };
666 
667  std::array<DataType,2> supportedOutputTypes = {
669  DataType::Signed64
670  };
671 
672  bool supported = true;
673 
674  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
675  "Reference ArgMinMax: input is not a supported type.");
676  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
677  "Reference ArgMinMax: output type not supported");
678 
679  return supported;
680 }
681 
683  const TensorInfo& output,
684  const TensorInfo& mean,
685  const TensorInfo& variance,
686  const TensorInfo& beta,
687  const TensorInfo& gamma,
690 {
691  IgnoreUnused(descriptor);
692 
693  std::array<DataType, 6> supportedTypes =
694  {
701  };
702 
703  bool supported = true;
704 
705  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
706  "Reference batch normalization: input is not a supported type.");
707 
708  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
709  "Reference batch normalization: output is not a supported type.");
710 
711  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
712  "Reference batch normalization: input and output types are mismatched");
713 
714  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
715  "Reference batch normalization: mean is not a supported type.");
716 
717  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
718  "Reference batch normalization: variance is not a supported type.");
719 
720  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
721  "Reference batch normalization: beta is not a supported type.");
722 
723  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
724  "Reference batch normalization: gamma is not a supported type.");
725 
726  return supported;
727 }
728 
730  const TensorInfo& output,
733 {
734  IgnoreUnused(descriptor);
735 
736  bool supported = true;
737 
738  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
739  std::string inputTensorStr = "input";
740  std::string outputTensorStr = "output";
741 
742  // Define supported types.
743  std::array<DataType,6> supportedTypes =
744  {
751  };
752 
753  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
754  "Reference BatchToSpaceNd: input type not supported.");
755 
756  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
757  "Reference BatchToSpaceNd: output type not supported.");
758 
759  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
760  "Reference BatchToSpaceNd: input and output types mismatched.");
761 
762  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
763  reasonIfUnsupported,
764  CreateIncorrectDimensionsErrorMsg(4,
765  output.GetNumDimensions(),
766  batchToSpaceNdLayerStr,
767  outputTensorStr).data());
768 
769  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
770  reasonIfUnsupported,
771  CreateIncorrectDimensionsErrorMsg(4,
772  input.GetNumDimensions(),
773  batchToSpaceNdLayerStr,
774  inputTensorStr).data());
775 
776  return supported;
777 }
778 
780  const TensorInfo& output,
782 {
783  std::array<DataType, 9> supportedInputTypes =
784  {
793  };
794 
795  bool supported = true;
796  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
797  "Reference cast: input is not a supported type");
798 
799 
800  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
801  "Reference cast: output is not a supported type");
802 
803  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
804  "Reference cast: input and output shapes have different number of total elements");
805 
806  return supported;
807 }
808 
810  const TensorInfo& output,
813 {
814  IgnoreUnused(descriptor);
815  bool supported = true;
816 
817  // Define supported output and inputs types.
818  std::array<DataType, 7> supportedTypes =
819  {
827  };
828 
829  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
830  "Reference ChannelShuffle: input is not a supported type.");
831 
832  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
833  "Reference ChannelShuffle: output is not a supported type.");
834 
835  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
836  "Reference ChannelShuffle: input and output types are mismatched.");
837 
838  return supported;
839 }
840 
841 
843  const TensorInfo& input1,
844  const TensorInfo& output,
847 {
848  IgnoreUnused(descriptor);
849  std::array<DataType, 8> supportedInputTypes =
850  {
859  };
860 
861  bool supported = true;
862  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
863  "Reference comparison: input 0 is not a supported type");
864 
865  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
866  "Reference comparison: input 0 and Input 1 types are mismatched");
867 
868  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
869  "Reference comparison: output is not of type Boolean");
870 
871  return supported;
872 }
873 
874 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
875  const TensorInfo& output,
878 {
879  IgnoreUnused(descriptor);
880 
881  bool supported = true;
882  std::array<DataType,6> supportedTypes =
883  {
890  };
891 
892  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
893  "Reference concatenation: output type not supported");
894  for (const TensorInfo* input : inputs)
895  {
896  ARMNN_ASSERT(input != nullptr);
897  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
898  "Reference concatenation: input type not supported");
899 
900  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
901  "Reference concatenation: input and output types mismatched.");
902  }
903 
904  return supported;
905 }
906 
909 {
910  std::array<DataType,8> supportedTypes =
911  {
920  };
921 
922  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
923  "Reference constant: output is not a supported type.");
924 }
925 
927  const TensorInfo& output,
929 {
930  bool supported = true;
931 
932  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
933  "Reference for ConvertBf16ToFp32 layer: input type not supported");
934 
935  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
936  "Reference for ConvertBf16ToFp32 layer: output type not supported");
937 
938  return supported;
939 }
940 
942  const TensorInfo& output,
944 {
945  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
946  input.GetDataType(),
947  &TrueFunc<>,
948  &FalseInputFuncF32<>,
949  &FalseFuncU8<>,
950  &FalseFuncI32<>,
951  &FalseFuncU8<>) &&
952  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
953  output.GetDataType(),
954  &FalseOutputFuncF16<>,
955  &TrueFunc<>,
956  &FalseFuncU8<>,
957  &FalseFuncI32<>,
958  &FalseFuncU8<>));
959 }
960 
962  const TensorInfo& output,
964 {
965  bool supported = true;
966 
967  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
968  "Reference for ConvertFp32ToBf16 layer: input type not supported");
969 
970  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
971  "Reference for ConvertFp32ToBf16 layer: output type not supported");
972 
973  return supported;
974 }
975 
977  const TensorInfo& output,
979 {
980  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
981  input.GetDataType(),
982  &FalseInputFuncF16<>,
983  &TrueFunc<>,
984  &FalseFuncU8<>,
985  &FalseFuncI32<>,
986  &FalseFuncU8<>) &&
987  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
988  output.GetDataType(),
989  &TrueFunc<>,
990  &FalseOutputFuncF32<>,
991  &FalseFuncU8<>,
992  &FalseFuncI32<>,
993  &FalseFuncU8<>));
994 }
995 
997  const TensorInfo& output,
999  const TensorInfo& weights,
1002 {
1003  bool supported = true;
1004 
1005  // Define supported types.
1006  std::array<DataType,7> supportedTypes =
1007  {
1015  };
1016 
1017  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1018  "Reference Convolution2d: input is not a supported type.");
1019 
1020  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1021  "Reference Convolution2d: output is not a supported type.");
1022 
1023  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1024  if (input.GetDataType() == DataType::BFloat16)
1025  {
1026  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
1027  {
1028  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1029  supported = false;
1030  }
1031  }
1032  else
1033  {
1034  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1035  "Reference Convolution2d: input and output types mismatched.");
1036  }
1037 
1038  const DataType inputType = input.GetDataType();
1039  if (IsQuantized8BitType(inputType))
1040  {
1041  std::array<DataType, 3> supportedWeightTypes =
1042  {
1046  };
1047 
1048  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1049  "Reference Convolution2d: weights type not supported for quantized input.");
1050  }
1051  else
1052  {
1053  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1054  "Reference Convolution2d: weights is not a supported type.");
1055 
1056  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1057  "Reference Convolution2d: input and weights types mismatched.");
1058  }
1059 
1060  if (biases.has_value())
1061  {
1062  std::array<DataType,4> biasesSupportedTypes =
1063  {
1068  };
1069 
1070  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1071  "Reference Convolution2d: biases is not a supported type.");
1072  }
1073  IgnoreUnused(descriptor);
1074 
1075  return supported;
1076 }
1077 
1079  const TensorInfo& output,
1081  const TensorInfo& weights,
1084 {
1085  bool supported = true;
1086 
1087  // Define supported types.
1088  std::array<DataType,7> supportedTypes =
1089  {
1097  };
1098 
1099  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1100  "Reference Convolution3d: input is not a supported type.");
1101 
1102  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1103  "Reference Convolution3d: output is not a supported type.");
1104 
1105  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1106  "Reference Convolution3d: input and output types mismatched.");
1107 
1108  const DataType inputType = input.GetDataType();
1109  if (IsQuantized8BitType(inputType))
1110  {
1111  std::array<DataType, 3> supportedWeightTypes =
1112  {
1116  };
1117 
1118  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1119  "Reference Convolution3d: weights type not supported for quantized input.");
1120  }
1121  else
1122  {
1123  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1124  "Reference Convolution3d: weights is not a supported type.");
1125 
1126  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1127  "Reference Convolution3d: input and weights types mismatched.");
1128  }
1129 
1130  if (biases.has_value())
1131  {
1132  std::array<DataType,4> biasesSupportedTypes =
1133  {
1138  };
1139 
1140  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1141  "Reference Convolution3d: biases is not a supported type.");
1142  }
1143  IgnoreUnused(descriptor);
1144 
1145  return supported;
1146 }
1147 
1149  const TensorInfo& output,
1151 {
1152  bool supported = true;
1153 
1154  std::array<DataType, 8> supportedTypes =
1155  {
1164  };
1165 
1166  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1167  "Reference for Debug layer: input type not supported");
1168 
1169  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1170  "Reference for Debug layer: output type not supported");
1171 
1172  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1173  "Reference for Debug layer: input and output types are mismatched");
1174 
1175  return supported;
1176 }
1177 
1179  const TensorInfo& output,
1182 {
1183  IgnoreUnused(descriptor);
1184  bool supported = true;
1185 
1186  std::array<DataType,6> supportedTypes =
1187  {
1194  };
1195 
1196  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1197  "Reference DepthToSpace: input type not supported");
1198 
1199  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1200  "Reference DepthToSpace: output type not supported");
1201 
1202  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1203  "Reference DepthToSpace: input and output types are mismatched");
1204 
1205  return supported;
1206 }
1207 
1209  const TensorInfo& output,
1211  const TensorInfo& weights,
1214 {
1215  IgnoreUnused(descriptor);
1216  bool supported = true;
1217 
1218  // Define supported types.
1219  std::array<DataType,7> supportedTypes =
1220  {
1228  };
1229 
1230  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1231  "Reference DepthwiseConvolution2d: input is not a supported type.");
1232 
1233  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1234  "Reference DepthwiseConvolution2d: output is not a supported type.");
1235 
1236  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1237  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1238 
1239  const DataType inputType = input.GetDataType();
1240  if (IsQuantized8BitType(inputType))
1241  {
1242  std::array<DataType, 3> supportedWeightTypes =
1243  {
1247  };
1248 
1249  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1250  "Reference DepthwiseConvolution2d: weights type not supported for "
1251  "quantized input.");
1252  }
1253  else
1254  {
1255  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1256  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1257 
1258  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1259  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1260  }
1261 
1262  if (biases.has_value())
1263  {
1264  std::array<DataType,4> biasesSupportedTypes =
1265  {
1270  };
1271  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1272  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1273  }
1274 
1275  return supported;
1276 
1277 }
1278 
1280  const TensorInfo& output,
1282 {
1283  bool supported = true;
1284 
1285  std::array<DataType,4> supportedInputTypes = {
1290  };
1291 
1292  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1293  "Reference for Dequantize layer: input type not supported.");
1294 
1295  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1296  "Reference for Dequantize layer: per-axis quantized input not supported.");
1297 
1298  std::array<DataType,3> supportedOutputTypes = {
1302  };
1303 
1304  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1305  "Reference for Dequantize layer: output type not supported.");
1306 
1307  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1308  "Reference for Dequantize layer: input/output shapes have different num total "
1309  "elements.");
1310 
1311  return supported;
1312 }
1313 
1315  const TensorInfo& scores,
1316  const TensorInfo& anchors,
1317  const TensorInfo& detectionBoxes,
1319  const TensorInfo& detectionScores,
1320  const TensorInfo& numDetections,
1323 {
1324  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1325 
1326  bool supported = true;
1327 
1328  std::array<DataType,6> supportedInputTypes =
1329  {
1336  };
1337 
1338  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1339  "Reference DetectionPostProcess: input 0 is not a supported type.");
1340 
1341  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1342  "Reference DetectionPostProcess: input 1 is not a supported type.");
1343 
1344  return supported;
1345 }
1346 
1348  const TensorInfo& output,
1350  const TensorInfo& weights,
1353 {
1354  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1355 }
1356 
1358  const TensorInfo& input1,
1359  const TensorInfo& output,
1361 {
1362  bool supported = true;
1363 
1364  std::array<DataType,7> supportedTypes = {
1372  };
1373 
1374  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1375  "Reference division: input 0 is not a supported type.");
1376 
1377  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1378  "Reference division: input 1 is not a supported type.");
1379 
1380  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1381  "Reference division: output is not a supported type.");
1382 
1383  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1384  "Reference division: input 0 and Input 1 types are mismatched");
1385 
1386  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1387  "Reference division: input and output types are mismatched");
1388 
1389  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1390  "Reference division: shapes are not suitable for implicit broadcast.");
1391 
1392  return supported;
1393 }
1394 
1396  const TensorInfo& output,
1399 {
1400  IgnoreUnused(descriptor);
1401 
1402  std::array<DataType, 7> supportedTypes =
1403  {
1411  };
1412 
1413  std::array<DataType, 1> logicalSupportedTypes =
1414  {
1416  };
1417 
1418  bool supported = true;
1419 
1420  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1421  {
1422  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1423  "Reference elementwise unary: input type not supported");
1424 
1425  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1426  "Reference elementwise unary: output type not supported");
1427  }
1428  else
1429  {
1430  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1431  "Reference elementwise unary: input type not supported");
1432 
1433  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1434  "Reference elementwise unary: output type not supported");
1435  }
1436 
1437  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1438  "Reference elementwise unary: input and output types not matching");
1439 
1440  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1441  "Reference elementwise unary: input and output shapes"
1442  "have different number of total elements");
1443 
1444  return supported;
1445 }
1446 
1450 {
1451  IgnoreUnused(descriptor);
1452  bool supported = true;
1453 
1454  std::array<DataType,1> supportedTypes =
1455  {
1457  };
1458 
1459  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1460  "Reference fake quantization: input type not supported.");
1461 
1462  return supported;
1463 }
1464 
1466  const TensorInfo& output,
1467  const FillDescriptor& descriptor,
1469 {
1470  IgnoreUnused(descriptor);
1471  IgnoreUnused(output);
1472 
1473  bool supported = true;
1474 
1475  std::array<DataType,3> supportedTypes =
1476  {
1480  };
1481 
1482  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1483  "Reference Fill: input type not supported.");
1484 
1485  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1486  "Reference Fill: output type not supported.");
1487  return supported;
1488 }
1489 
1491  const TensorInfo& output,
1493 {
1494  IgnoreUnused(output);
1495  bool supported = true;
1496 
1497  std::array<DataType,3> supportedTypes =
1498  {
1502  };
1503 
1504  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1505  "Reference Floor: input type not supported.");
1506 
1507  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1508  "Reference Floor: output type not supported.");
1509 
1510  return supported;
1511 }
1512 
1514  const TensorInfo& output,
1515  const TensorInfo& weights,
1516  const TensorInfo& biases,
1519 {
1520  bool supported = true;
1521 
1522  // Define supported types.
1523  std::array<DataType,6> supportedTypes =
1524  {
1531  };
1532 
1533  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1534  "Reference Fully Connected: input type not supported.");
1535 
1536  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1537  "Reference Fully Connected: output type not supported.");
1538 
1539  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1540  "Reference Fully Connected: weights type not supported.");
1541 
1542  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1543  if (input.GetDataType() == DataType::BFloat16)
1544  {
1545  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
1546  {
1547  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1548  supported = false;
1549  }
1550  }
1551  else
1552  {
1553  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1554  "Reference Fully Connected: input and output types mismatched.");
1555  }
1556 
1557  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1558  "Reference Fully Connected: weights is not a supported type.");
1559 
1560  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1561  "Reference Fully Connected: input and weights types mismatched.");
1562 
1563  if (descriptor.m_BiasEnabled)
1564  {
1565  // Defined supported types for bias
1566  std::array<DataType, 5>
1567  supportedBiasTypes =
1568  {
1574  };
1575 
1576  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1577  "Reference Fully Connected: bias type not supported.");
1578 
1579  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1580  "Reference Fully Connected: bias and weight types mismatch.");
1581 
1582  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1583  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1584 
1585  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1586  "Reference Fully Connected: bias must have 1 dimension.");
1587 
1588  }
1589 
1590  return supported;
1591 }
1592 
1594  const armnn::TensorInfo& input1,
1595  const armnn::TensorInfo& output,
1598 {
1599  bool supported = true;
1600  std::array<DataType,7> supportedTypes =
1601  {
1609  };
1610 
1611  if (descriptor.m_Axis != 0)
1612  {
1613  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1614  supported &= false;
1615  }
1616  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1617  "Reference Gather: input type not supported");
1618 
1619  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1620  "Reference Gather: output type not supported");
1621 
1622  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1623  "Reference Gather: indices (input1) type not supported");
1624 
1625  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1626  "Reference Gather: input and output types not matching");
1627 
1628  return supported;
1629 }
1630 
1632  Optional<std::string&> /*reasonIfUnsupported*/) const
1633 {
1634  return true;
1635 }
1636 
1638  const TensorInfo& output,
1641 {
1642  IgnoreUnused(descriptor);
1643  // Define supported types
1644  std::array<DataType, 3> supportedTypes =
1645  {
1649  };
1650 
1651  bool supported = true;
1652 
1653  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1654  "Reference Instance Normalization: input type not supported.");
1655 
1656  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1657  "Reference Instance Normalization: output type not supported.");
1658 
1659  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1660  "Reference Instance Normalization: input and output types mismatched.");
1661 
1662  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1663  "Reference Instance Normalization: input and output shapes have different "
1664  "num total elements.");
1665 
1666  return supported;
1667 }
1668 
1670  const TensorInfo& output,
1673 {
1674  IgnoreUnused(descriptor);
1675  // Define supported types
1676  std::array<DataType, 6> supportedTypes =
1677  {
1684  };
1685 
1686  bool supported = true;
1687 
1688  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1689  "Reference L2normalization: input type not supported.");
1690 
1691  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1692  "Reference L2normalization: output type not supported.");
1693 
1694  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1695  "Reference L2normalization: input and output types mismatched.");
1696 
1697  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1698  "Reference L2normalization: input and output shapes have different "
1699  "num total elements.");
1700 
1701  return supported;
1702 }
1703 
1705  const TensorInfo& input1,
1706  const TensorInfo& output,
1709 {
1710  IgnoreUnused(descriptor);
1711 
1712  std::array<DataType, 1> supportedTypes =
1713  {
1715  };
1716 
1717  bool supported = true;
1718  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1719  "Reference LogicalBinary: input 0 type not supported");
1720  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1721  "Reference LogicalBinary: input 1 type not supported");
1722 
1723  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1724  "Reference LogicalBinary: input and output types do not match");
1725 
1726  return supported;
1727 }
1728 
1730  const TensorInfo& output,
1733 {
1734  IgnoreUnused(descriptor);
1735 
1736  std::array<DataType, 3> supportedTypes =
1737  {
1741  };
1742 
1743  bool supported = true;
1744  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1745  "Reference LogSoftmax: input type not supported");
1746 
1747  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1748  "Reference LogSoftmax: output type not supported");
1749 
1750  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1751  "Reference LogSoftmax: input and output types do not match");
1752 
1753  return supported;
1754 }
1755 
1757  const TensorInfo& outputStateIn,
1758  const TensorInfo& cellStateIn,
1759  const TensorInfo& scratchBuffer,
1760  const TensorInfo& outputStateOut,
1761  const TensorInfo& cellStateOut,
1762  const TensorInfo& output,
1763  const LstmDescriptor& descriptor,
1766 {
1767  IgnoreUnused(descriptor);
1768  IgnoreUnused(paramsInfo);
1769 
1770  bool supported = true;
1771 
1772  std::array<DataType,3> supportedTypes = {
1776  };
1777 
1778  // check inputs and outputs
1779  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1780  "Reference Lstm: input is not a supported type.");
1781  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1782  "Reference Lstm: input and outputStateIn types are mismatched");
1783  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1784  "Reference Lstm: input and cellStateIn types are mismatched");
1785  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1786  "Reference Lstm: input and scratchBuffer types are mismatched");
1787  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1788  "Reference Lstm: input and outputStateOut types are mismatched");
1789  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1790  "Reference Lstm: input and cellStateOut types are mismatched");
1791 
1792  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1793  "Reference Lstm: input and output types are mismatched");
1794  // check layer parameters
1795  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1796  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1797  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1798  "Reference Lstm: input and InputToCellWeights types are mismatched");
1799  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1800  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1801  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1802  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1803  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1804  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1805  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1806  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1808  "Reference Lstm: input and ForgetGateBias types are mismatched");
1809  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1810  "Reference Lstm: input and CellBias types are mismatched");
1811  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1812  "Reference Lstm: input and OutputGateBias types are mismatched");
1813  if (!descriptor.m_CifgEnabled)
1814  {
1815  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1816  "Reference Lstm: input and InputToInputWeights types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1818  reasonIfUnsupported,
1819  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1820  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1821  "Reference Lstm: input and InputGateBias types are mismatched");
1822  if (descriptor.m_PeepholeEnabled)
1823  {
1824  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1825  reasonIfUnsupported,
1826  "Reference Lstm: input and CellToInputWeights types are mismatched");
1827  }
1828  }
1829  if (descriptor.m_PeepholeEnabled)
1830  {
1831  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1832  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1833  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1834  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1835  }
1836  if (descriptor.m_ProjectionEnabled)
1837  {
1838  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1839  "Reference Lstm: input and mProjectionWeights types are mismatched");
1840  if (paramsInfo.m_ProjectionBias != nullptr)
1841  {
1842  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1843  "Reference Lstm: input and ProjectionBias types are mismatched");
1844  }
1845  }
1846  if (descriptor.m_LayerNormEnabled)
1847  {
1848  if (!descriptor.m_CifgEnabled)
1849  {
1850  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1851  reasonIfUnsupported,
1852  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1853  }
1854  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1855  reasonIfUnsupported,
1856  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1857  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1858  reasonIfUnsupported,
1859  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1860  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1861  reasonIfUnsupported,
1862  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1863  }
1864 
1865  return supported;
1866 }
1867 
1869  const TensorInfo& input1,
1870  const TensorInfo& output,
1872 {
1873  bool supported = true;
1874 
1875  std::array<DataType,7> supportedTypes = {
1883  };
1884 
1885  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1886  "Reference maximum: input 0 is not a supported type.");
1887 
1888  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1889  "Reference maximum: input 1 is not a supported type.");
1890 
1891  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1892  "Reference maximum: output is not a supported type.");
1893 
1894  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1895  "Reference maximum: input 0 and Input 1 types are mismatched");
1896 
1897  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1898  "Reference maximum: input and output types are mismatched");
1899 
1900  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1901  "Reference maximum: shapes are not suitable for implicit broadcast.");
1902 
1903  return supported;
1904 }
1905 
1907  const TensorInfo& output,
1908  const MeanDescriptor& descriptor,
1910 {
1911  bool supported = true;
1912  std::string meanLayerStr = "Mean";
1913  std::string outputTensorStr = "output";
1914 
1915  std::array<DataType,6> supportedTypes =
1916  {
1923  };
1924 
1925  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1926  "Reference Mean: input type not supported.");
1927 
1928  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1929  "Reference Mean: input and output types are mismatched");
1930 
1931  if (descriptor.m_KeepDims)
1932  {
1933  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1934  reasonIfUnsupported,
1935  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1936  output.GetNumDimensions(),
1937  meanLayerStr, outputTensorStr).data());
1938  }
1939  else if (descriptor.m_Axis.empty())
1940  {
1941  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1942  reasonIfUnsupported,
1943  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1944  meanLayerStr, outputTensorStr).data());
1945  }
1946  else
1947  {
1948  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1949 
1950  if (outputDim > 0)
1951  {
1952  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1953  reasonIfUnsupported,
1954  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1955  meanLayerStr, outputTensorStr).data());
1956  }
1957  else
1958  {
1959  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1960  reasonIfUnsupported,
1961  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1962  meanLayerStr, outputTensorStr).data());
1963  }
1964  }
1965 
1966  return supported;
1967 }
1968 
1970  const TensorInfo &output,
1972 {
1973  bool supported = true;
1974 
1975  std::array<DataType,7> supportedTypes =
1976  {
1984  };
1985 
1986  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1987  "Reference MemCopy: input type not supported");
1988 
1989  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1990  "Reference MemCopy: output type not supported");
1991 
1992  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1993  "Reference MemCopy: input and output types are mismatched");
1994 
1995  return supported;
1996 }
1997 
1999  const TensorInfo& input1,
2000  const TensorInfo& output,
2002 {
2003  bool supported = true;
2004 
2005  std::array<DataType,7> supportedTypes = {
2013  };
2014 
2015  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2016  "Reference minimum: input 0 is not a supported type.");
2017 
2018  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2019  "Reference minimum: input 1 is not a supported type.");
2020 
2021  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2022  "Reference minimum: output is not a supported type.");
2023 
2024  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2025  "Reference minimum: input 0 and Input 1 types are mismatched");
2026 
2027  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2028  "Reference minimum: input and output types are mismatched");
2029 
2030  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2031  "Reference minimum: shapes are not suitable for implicit broadcast.");
2032 
2033  return supported;
2034 }
2035 
2037  const TensorInfo& input1,
2038  const TensorInfo& output,
2040 {
2041  bool supported = true;
2042 
2043  std::array<DataType,7> supportedTypes = {
2051  };
2052 
2053  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2054  "Reference multiplication: input 0 is not a supported type.");
2055 
2056  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2057  "Reference multiplication: input 1 is not a supported type.");
2058 
2059  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2060  "Reference multiplication: output is not a supported type.");
2061 
2062  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2063  "Reference multiplication: input 0 and Input 1 types are mismatched");
2064 
2065  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2066  "Reference multiplication: input and output types are mismatched");
2067 
2068  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2069  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2070 
2071  return supported;
2072 }
2073 
2075  const TensorInfo& output,
2078 {
2079  IgnoreUnused(descriptor);
2080 
2081  // Define supported types
2082  std::array<DataType, 6> supportedTypes =
2083  {
2090  };
2091 
2092  bool supported = true;
2093 
2094  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2095  "Reference normalization: input type not supported.");
2096 
2097  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2098  "Reference normalization: output type not supported.");
2099 
2100  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2101  "Reference normalization: input and output shapes have different "
2102  "num total elements.");
2103 
2104  return supported;
2105 }
2106 
2108  Optional<std::string&> /*reasonIfUnsupported*/) const
2109 {
2110  return true;
2111 }
2112 
2114  const TensorInfo& output,
2115  const PadDescriptor& descriptor,
2117 {
2118  IgnoreUnused(descriptor);
2119  bool supported = true;
2120 
2121  // Define supported output and inputs types.
2122  std::array<DataType,6> supportedTypes =
2123  {
2130  };
2131 
2132  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2133  "Reference pad: input is not a supported type.");
2134 
2135  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2136  "Reference pad: output is not a supported type.");
2137 
2138  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2139  "Reference pad: input and output types are mismatched.");
2140 
2141  return supported;
2142 }
2143 
2145  const TensorInfo& output,
2148 {
2149  IgnoreUnused(descriptor);
2150  bool supported = true;
2151 
2152  // Define supported output and inputs types.
2153  std::array<DataType, 6> supportedTypes =
2154  {
2161  };
2162 
2163  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2164  "Reference permute: input is not a supported type.");
2165 
2166  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2167  "Reference permute: output is not a supported type.");
2168 
2169  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2170  "Reference permute: input and output types are mismatched.");
2171 
2172  return supported;
2173 }
2174 
2176  const TensorInfo& output,
2179 {
2180  IgnoreUnused(descriptor);
2181  bool supported = true;
2182 
2183  // Define supported output and inputs types.
2184  std::array<DataType,6> supportedTypes =
2185  {
2192  };
2193 
2194  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2195  "Reference poolind2d: input is not a supported type.");
2196 
2197  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2198  "Reference poolind2d: output is not a supported type.");
2199 
2200  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2201  "Reference poolind2d: input and output types are mismatched.");
2202 
2203  return supported;
2204 }
2205 
2207  const TensorInfo& output,
2210 {
2211  IgnoreUnused(descriptor);
2212  bool supported = true;
2213 
2214  // Define supported output and inputs types.
2215  std::array<DataType,6> supportedTypes =
2216  {
2223  };
2224 
2225  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2226  "Reference poolind3d: input is not a supported type.");
2227 
2228  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2229  "Reference poolind3d: output is not a supported type.");
2230 
2231  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2232  "Reference poolind3d: input and output types are mismatched.");
2233 
2234  return supported;
2235 }
2236 
2237 
2241  const TensorInfo& outputStateOut,
2242  const TensorInfo& cellStateOut,
2243  const TensorInfo& output,
2244  const QLstmDescriptor& descriptor,
2247 {
2248  IgnoreUnused(input);
2249  IgnoreUnused(previousOutputIn);
2250  IgnoreUnused(previousCellStateIn);
2251  IgnoreUnused(outputStateOut);
2252  IgnoreUnused(cellStateOut);
2253  IgnoreUnused(output);
2254  IgnoreUnused(descriptor);
2255  IgnoreUnused(paramsInfo);
2256 
2257  IgnoreUnused(reasonIfUnsupported);
2258 
2259  return true;
2260 }
2261 
2263  const TensorInfo& output,
2265 {
2266  bool supported = true;
2267 
2268  // Define supported input types.
2269  std::array<DataType,7> supportedInputTypes = {
2277  };
2278 
2279  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2280  "Reference quantize: input type not supported.");
2281 
2282  // Define supported output types.
2283  std::array<DataType,4> supportedOutputTypes = {
2287  DataType::QSymmS16
2288  };
2289  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2290  "Reference quantize: output type not supported.");
2291 
2292  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2293  "Reference quantize: input and output shapes have different num total elements.");
2294 
2295  return supported;
2296 }
2297 
2299  const TensorInfo& output,
2301 {
2302  IgnoreUnused(input);
2303  // Define supported output types.
2304  std::array<DataType,1> supportedOutputTypes =
2305  {
2307  };
2308 
2309  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2310  "Reference rank: input type not supported.");
2311 }
2312 
2314  const TensorInfo& output,
2317 {
2318  IgnoreUnused(descriptor);
2319  bool supported = true;
2320  std::array<DataType,7> supportedTypes =
2321  {
2329  };
2330 
2331  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2332  "Reference Reduce: input type not supported");
2333 
2334  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2335  "Reference Reduce: output type not supported");
2336 
2337  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2338  "Reference Reduce: input and output types not matching");
2339 
2340  return supported;
2341 }
2342 
2344  const TensorInfo& output,
2347 {
2348  IgnoreUnused(output);
2349  IgnoreUnused(descriptor);
2350  // Define supported output types.
2351  std::array<DataType,8> supportedOutputTypes =
2352  {
2361  };
2362 
2363  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2364  "Reference reshape: input type not supported.");
2365 }
2366 
2368  const TensorInfo& output,
2371 {
2372  IgnoreUnused(descriptor);
2373  bool supported = true;
2374  std::array<DataType,6> supportedTypes =
2375  {
2382  };
2383 
2384  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2385  "Reference Resize: input type not supported");
2386 
2387  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2388  "Reference Resize: output type not supported");
2389 
2390  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2391  "Reference Resize: input and output types not matching");
2392 
2393  return supported;
2394 }
2395 
2397  const TensorInfo& output,
2399 {
2400  IgnoreUnused(input);
2401  bool supported = true;
2402 
2403  std::array<DataType, 1> supportedTypes =
2404  {
2406  };
2407 
2408  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2409  "Reference Shape: output type not supported");
2410 
2411  return supported;
2412 }
2413 
2415  const TensorInfo& output,
2416  const SliceDescriptor& descriptor,
2418 {
2419  IgnoreUnused(descriptor);
2420  bool supported = true;
2421 
2422  std::array<DataType, 5> supportedTypes =
2423  {
2429  };
2430 
2431  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2432  "Reference Slice: input type not supported");
2433 
2434  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2435  "Reference Slice: output type not supported");
2436 
2437  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2438  "Reference Slice: input and output types are mismatched");
2439 
2440  return supported;
2441 }
2442 
2444  const TensorInfo& output,
2447 {
2448  IgnoreUnused(descriptor);
2449  bool supported = true;
2450  std::array<DataType,7> supportedTypes =
2451  {
2459  };
2460 
2461  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2462  "Reference Softmax: output type not supported");
2463 
2464  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2465  "Reference Softmax: input type not supported");
2466 
2467  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2468  "Reference Softmax: input type not supported");
2469 
2470  return supported;
2471 }
2472 
2474  const TensorInfo& output,
2477 {
2478  IgnoreUnused(descriptor);
2479  bool supported = true;
2480  std::array<DataType,6> supportedTypes =
2481  {
2488  };
2489 
2490  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2491  "Reference SpaceToBatchNd: input type not supported");
2492 
2493  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2494  "Reference SpaceToBatchNd: output type not supported");
2495 
2496  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2497  "Reference SpaceToBatchNd: input and output types are mismatched");
2498 
2499  return supported;
2500 }
2501 
2503  const TensorInfo& output,
2506 {
2507 
2508  IgnoreUnused(descriptor);
2509  bool supported = true;
2510 
2511  std::array<DataType,6> supportedTypes =
2512  {
2519  };
2520 
2521  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2522  "Reference SpaceToDepth: input type not supported");
2523 
2524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2525  "Reference SpaceToDepth: output type not supported");
2526 
2527  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2528  "Reference SpaceToDepth: input and output types are mismatched");
2529 
2530  return supported;
2531 }
2532 
2534  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2535  const ViewsDescriptor& descriptor,
2537 {
2538  IgnoreUnused(descriptor);
2539  bool supported = true;
2540  std::array<DataType,6> supportedTypes =
2541  {
2548  };
2549 
2550  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2551  "Reference splitter: output type not supported");
2552  for (const TensorInfo& output : outputs)
2553  {
2554  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2555  "Reference splitter: input type not supported");
2556 
2557  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2558  "Reference splitter: input and output types mismatched.");
2559  }
2560 
2561  return supported;
2562 }
2563 
2564 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2565  const TensorInfo& output,
2566  const StackDescriptor& descriptor,
2568 {
2569  IgnoreUnused(descriptor);
2570 
2571  bool supported = true;
2572  std::array<DataType,7> supportedTypes =
2573  {
2581  };
2582 
2583  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2584  "Reference stack: output type not supported");
2585  for (const TensorInfo* input : inputs)
2586  {
2587  ARMNN_ASSERT(input != nullptr);
2588  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2589  "Reference stack: input type not supported");
2590 
2591  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2592  "Reference stack: input and output types mismatched.");
2593  }
2594 
2595  return supported;
2596 }
2597 
2599  const TensorInfo& output,
2602 {
2603  IgnoreUnused(descriptor);
2604  bool supported = true;
2605 
2606  std::array<DataType,5> supportedTypes =
2607  {
2613  };
2614 
2615  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2616  "Reference StridedSlice: input type not supported");
2617 
2618  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2619  "Reference StridedSlice: output type not supported");
2620 
2621  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2622  "Reference StridedSlice: input and output types are mismatched");
2623 
2624  return supported;
2625 }
2626 
2628  const TensorInfo& input1,
2629  const TensorInfo& output,
2631 {
2632  bool supported = true;
2633 
2634  std::array<DataType,7> supportedTypes = {
2642  };
2643 
2644  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2645  "Reference subtraction: input 0 is not a supported type.");
2646 
2647  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2648  "Reference subtraction: input 1 is not a supported type.");
2649 
2650  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2651  "Reference subtraction: output is not a supported type.");
2652 
2653  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2654  "Reference subtraction: input 0 and Input 1 types are mismatched");
2655 
2656  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2657  "Reference subtraction: input and output types are mismatched");
2658 
2659  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2660  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2661 
2662  return supported;
2663 }
2664 
2666  const TensorInfo& alpha,
2667  const TensorInfo& output,
2669 {
2670  bool supported = true;
2671 
2672  std::array<DataType, 6> supportedTypes
2673  {
2680  };
2681 
2682  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2683  "PReLU: input is not a supported type.");
2684 
2685  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2686  "PReLU: alpha is not a supported type.");
2687 
2688  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2689  "PReLU: output is not a supported type.");
2690 
2691  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2692  "PReLU: input, alpha and output types are mismatched");
2693 
2694  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2695  "PReLU: shapes are not suitable for implicit broadcast");
2696 
2697  return supported;
2698 }
2699 
2701  const TensorInfo& output,
2703  const TensorInfo& weights,
2706 {
2707  IgnoreUnused(descriptor);
2708  bool supported = true;
2709 
2710  std::array<DataType,7> supportedTypes =
2711  {
2719  };
2720 
2721  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2722  "Reference TransposeConvolution2d: input is not a supported type.");
2723 
2724  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2725  "Reference TransposeConvolution2d: output is not a supported type.");
2726 
2727  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2728  "Reference TransposeConvolution2d: input and output types mismatched.");
2729 
2730 
2731  const DataType inputType = input.GetDataType();
2732  if (IsQuantized8BitType(inputType))
2733  {
2734  std::array<DataType, 3> supportedWeightTypes =
2735  {
2739  };
2740 
2741  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2742  "Reference TransposeConvolution2d: weights type not supported for "
2743  "quantized input.");
2744  }
2745  else
2746  {
2747  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2748  "Reference TransposeConvolution2d: weights is not a supported type.");
2749 
2750  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2751  "Reference TransposeConvolution2d: input and weights types mismatched.");
2752  }
2753 
2754  if (biases.has_value())
2755  {
2756  std::array<DataType,4> biasesSupportedTypes =
2757  {
2762  };
2763  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2764  "Reference TransposeConvolution2d: biases is not a supported type.");
2765  }
2766 
2767  return supported;
2768 }
2769 
2771  const TensorInfo& output,
2774 {
2775  IgnoreUnused(descriptor);
2776  bool supported = true;
2777 
2778  // Define supported output and inputs types.
2779  std::array<DataType, 6> supportedTypes =
2780  {
2787  };
2788 
2789  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2790  "Reference transpose: input is not a supported type.");
2791 
2792  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2793  "Reference transpose: output is not a supported type.");
2794 
2795  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2796  "Reference transpose: input and output types are mismatched.");
2797 
2798  return supported;
2799 }
2800 
2802  const TensorInfo& input,
2803  const TensorInfo& outputStateIn,
2804  const TensorInfo& cellStateIn,
2805  const TensorInfo& output,
2811 {
2812  IgnoreUnused(descriptor);
2813  IgnoreUnused(paramsInfo);
2814  IgnoreUnused(outputStateIn);
2815  IgnoreUnused(cellStateIn);
2816  bool supported = true;
2817 
2818  if (hiddenStateOutput.has_value() || cellStateOutput.has_value())
2819  {
2820  reasonIfUnsupported.value() += "Reference UnidirectionalSequenceLstm: hidden state output "
2821  "and cell state output are not supported at the moment.";
2822  }
2823 
2824  std::array<DataType, 1> supportedTypes =
2825  {
2827  };
2828 
2829  std::array<DataType, 2> supportedWeightTypes =
2830  {
2833  };
2834 
2835  // check inputs and outputs
2836  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2837  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2838  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
2839  "Reference UnidirectionalSequenceLstm: input and outputStateIn types are mismatched");
2840  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
2841  "Reference UnidirectionalSequenceLstm: input and cellStateIn types are mismatched");
2842 
2843  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2844  "Reference UnidirectionalSequenceLstm: input and output types are mismatched");
2845  // check layer parameters
2846  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2847  reasonIfUnsupported,
2848  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2849  "is not a supported type.");
2850  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2851  reasonIfUnsupported,
2852  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2853  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2854  reasonIfUnsupported,
2855  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2856  "is not a supported type.");
2857  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2858  reasonIfUnsupported,
2859  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2860  "is not a supported type.");
2861  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2862  reasonIfUnsupported,
2863  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2864  "is not a supported type.");
2865  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2866  reasonIfUnsupported,
2867  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2868  "is not a supported type.");
2869  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
2870  "Reference UnidirectionalSequenceLstm: input and ForgetGateBias types "
2871  "are mismatched");
2872  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
2873  "Reference UnidirectionalSequenceLstm: input and CellBias types are mismatched");
2874  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
2875  "Reference UnidirectionalSequenceLstm: input and OutputGateBias types "
2876  "are mismatched");
2877  if (!descriptor.m_CifgEnabled)
2878  {
2879  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2880  reasonIfUnsupported,
2881  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2882  "is not a supported type.");
2883  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2884  reasonIfUnsupported,
2885  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2886  "is not a supported type.");
2887  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
2888  "Reference UnidirectionalSequenceLstm: input and InputGateBias types "
2889  "are mismatched");
2890  if (descriptor.m_PeepholeEnabled)
2891  {
2892  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2893  reasonIfUnsupported,
2894  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2895  "is not a supported type.");
2896  }
2897  }
2898  if (descriptor.m_PeepholeEnabled)
2899  {
2900  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2901  reasonIfUnsupported,
2902  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2903  "is not a supported type.");
2904  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2905  reasonIfUnsupported,
2906  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2907  "is not a supported type.");
2908  }
2909  if (descriptor.m_ProjectionEnabled)
2910  {
2911  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2912  reasonIfUnsupported,
2913  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2914  "is not a supported type.");
2915  if (paramsInfo.m_ProjectionBias != nullptr)
2916  {
2917  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2918  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2919  "are mismatched");
2920  }
2921  }
2922  if (descriptor.m_LayerNormEnabled)
2923  {
2924  if (!descriptor.m_CifgEnabled)
2925  {
2926  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2927  reasonIfUnsupported,
2928  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2929  "is not a supported type.");
2930  }
2931  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2932  reasonIfUnsupported,
2933  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2934  "is not a supported type.");
2935  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2936  reasonIfUnsupported,
2937  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2938  "is not a supported type.");
2939  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2940  reasonIfUnsupported,
2941  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2942  "is not a supported type.");
2943  }
2944 
2945  return supported;
2946 }
2947 
2948 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > & hiddenStateOutput
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
const TensorInfo const TensorInfo & anchors
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > & cellStateOutput
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
const TensorInfo & scores
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
const TensorInfo const ActivationDescriptor & descriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & previousCellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
const TensorInfo & alpha
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PadDescriptor for the PadLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:35
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo & cellStateIn
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
DataType GetDataType() const
Definition: Tensor.hpp:198
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A QLstmDescriptor for the QLstmLayer.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
min(a, max(b, input)) ReLu1 & ReLu6.
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
const TensorInfo & previousOutputIn
A Pooling3dDescriptor for the Pooling3dLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & output
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
A MeanDescriptor for the MeanLayer.
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & input1
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const TensorInfo & mean
A PermuteDescriptor for the PermuteLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458