ArmNN
 20.08
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/Descriptors.hpp>
12 
13 #include <LayerSupportCommon.hpp>
15 
16 #include <boost/cast.hpp>
17 
18 #include <vector>
19 #include <array>
20 
21 using namespace boost;
22 
23 namespace armnn
24 {
25 
26 namespace
27 {
28 
29 template<typename Float32Func, typename Uint8Func, typename ... Params>
30 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
31  DataType dataType,
32  Float32Func floatFuncPtr,
33  Uint8Func uint8FuncPtr,
34  Params&&... params)
35 {
36  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
37  dataType,
38  &FalseFunc<Params...>,
39  floatFuncPtr,
40  uint8FuncPtr,
41  &FalseFunc<Params...>,
42  &FalseFunc<Params...>,
43  std::forward<Params>(params)...);
44 }
45 
46 } // anonymous namespace
47 
48 namespace
49 {
50 
51 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
52  unsigned int actual,
53  std::string& layerStr,
54  std::string& tensorName)
55 {
56  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
57  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
58 
59  return errorMsg;
60 }
61 
62 } // anonymous namespace
63 
64 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
65  Optional<std::string&> reasonIfUnsupported) const
66 {
67  return IsElementwiseUnarySupported(input,
68  output,
69  ElementwiseUnaryDescriptor(UnaryOperation::Abs),
70  reasonIfUnsupported);
71 }
72 
74  const TensorInfo& output,
75  const ActivationDescriptor& descriptor,
76  Optional<std::string&> reasonIfUnsupported) const
77 {
78  bool supported = true;
79 
80  // Define supported types.
81  std::array<DataType,6> supportedTypes = {
82  DataType::BFloat16,
83  DataType::Float32,
84  DataType::Float16,
85  DataType::QAsymmS8,
86  DataType::QAsymmU8,
87  DataType::QSymmS16
88  };
89 
90  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
91  "Reference activation: input type not supported.");
92 
93  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
94  "Reference activation: output type not supported.");
95 
96  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
97  "Reference activation: input and output types mismatched.");
98 
99  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
100  "Reference activation: input and output shapes are of different rank.");
101 
102 
103  struct ActivationFunctionSupported : public Rule
104  {
105  ActivationFunctionSupported(const ActivationDescriptor& desc)
106  {
107  switch(desc.m_Function)
108  {
109  case ActivationFunction::Abs:
110  case ActivationFunction::BoundedReLu:
111  case ActivationFunction::Elu:
112  case ActivationFunction::HardSwish:
113  case ActivationFunction::LeakyReLu:
114  case ActivationFunction::Linear:
115  case ActivationFunction::ReLu:
116  case ActivationFunction::Sigmoid:
117  case ActivationFunction::SoftReLu:
118  case ActivationFunction::Sqrt:
119  case ActivationFunction::Square:
120  case ActivationFunction::TanH:
121  {
122  m_Res = true;
123  break;
124  }
125  default:
126  {
127  m_Res = false;
128  break;
129  }
130  }
131  }
132  };
133 
134  // Function is supported
135  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
136  "Reference activation: function not supported.");
137 
138  return supported;
139 }
140 
142  const TensorInfo& input1,
143  const TensorInfo& output,
144  Optional<std::string&> reasonIfUnsupported) const
145 {
146  bool supported = true;
147 
148  std::array<DataType,7> supportedTypes = {
149  DataType::BFloat16,
150  DataType::Float32,
151  DataType::Float16,
152  DataType::QAsymmS8,
153  DataType::QAsymmU8,
154  DataType::QSymmS16,
155  DataType::Signed32
156  };
157 
158  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
159  "Reference addition: input 0 is not a supported type.");
160 
161  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
162  "Reference addition: input 1 is not a supported type.");
163 
164  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
165  "Reference addition: output is not a supported type.");
166 
167  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
168  "Reference addition: input 0 and Input 1 types are mismatched");
169 
170  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
171  "Reference addition: input and output types are mismatched");
172 
173  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
174  "Reference addition: shapes are not suitable for implicit broadcast.");
175 
176  return supported;
177 }
178 
180  const armnn::ArgMinMaxDescriptor &descriptor,
181  armnn::Optional<std::string &> reasonIfUnsupported) const
182 {
183  IgnoreUnused(descriptor);
184 
185  std::array<DataType, 7> supportedTypes =
186  {
187  DataType::BFloat16,
188  DataType::Float16,
189  DataType::Float32,
190  DataType::QAsymmS8,
191  DataType::QAsymmU8,
192  DataType::QSymmS16,
193  DataType::Signed32
194  };
195 
196  bool supported = true;
197 
198  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
199  "Reference ArgMinMax: input is not a supported type.");
200  supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
201  "Reference ArgMinMax: output type not supported");
202 
203  return supported;
204 }
205 
207  const TensorInfo& output,
208  const TensorInfo& mean,
209  const TensorInfo& variance,
210  const TensorInfo& beta,
211  const TensorInfo& gamma,
212  const BatchNormalizationDescriptor& descriptor,
213  Optional<std::string&> reasonIfUnsupported) const
214 {
215  IgnoreUnused(descriptor);
216 
217  std::array<DataType, 6> supportedTypes =
218  {
219  DataType::BFloat16,
220  DataType::Float32,
221  DataType::Float16,
222  DataType::QAsymmS8,
223  DataType::QAsymmU8,
224  DataType::QSymmS16
225  };
226 
227  bool supported = true;
228 
229  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
230  "Reference batch normalization: input is not a supported type.");
231 
232  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
233  "Reference batch normalization: output is not a supported type.");
234 
235  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
236  "Reference batch normalization: input and output types are mismatched");
237 
238  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
239  "Reference batch normalization: mean is not a supported type.");
240 
241  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
242  "Reference batch normalization: variance is not a supported type.");
243 
244  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
245  "Reference batch normalization: beta is not a supported type.");
246 
247  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
248  "Reference batch normalization: gamma is not a supported type.");
249 
250  return supported;
251 }
252 
254  const TensorInfo& output,
255  const BatchToSpaceNdDescriptor& descriptor,
256  Optional<std::string&> reasonIfUnsupported) const
257 {
258  IgnoreUnused(descriptor);
259 
260  bool supported = true;
261 
262  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
263  std::string inputTensorStr = "input";
264  std::string outputTensorStr = "output";
265 
266  // Define supported types.
267  std::array<DataType,6> supportedTypes =
268  {
269  DataType::BFloat16,
270  DataType::Float32,
271  DataType::Float16,
272  DataType::QAsymmS8,
273  DataType::QAsymmU8,
274  DataType::QSymmS16
275  };
276 
277  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
278  "Reference BatchToSpaceNd: input type not supported.");
279 
280  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
281  "Reference BatchToSpaceNd: output type not supported.");
282 
283  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
284  "Reference BatchToSpaceNd: input and output types mismatched.");
285 
286  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
287  reasonIfUnsupported,
288  CreateIncorrectDimensionsErrorMsg(4,
289  output.GetNumDimensions(),
290  batchToSpaceNdLayerStr,
291  outputTensorStr).data());
292 
293  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
294  reasonIfUnsupported,
295  CreateIncorrectDimensionsErrorMsg(4,
296  input.GetNumDimensions(),
297  batchToSpaceNdLayerStr,
298  inputTensorStr).data());
299 
300  return supported;
301 }
302 
303 bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
304  const TensorInfo& input1,
305  const TensorInfo& output,
306  const ComparisonDescriptor& descriptor,
307  Optional<std::string&> reasonIfUnsupported) const
308 {
309  IgnoreUnused(descriptor);
310  std::array<DataType, 8> supportedInputTypes =
311  {
312  DataType::Boolean,
313  DataType::BFloat16,
314  DataType::Float32,
315  DataType::Float16,
316  DataType::QAsymmS8,
317  DataType::QAsymmU8,
318  DataType::QSymmS16,
319  DataType::Signed32
320  };
321 
322  bool supported = true;
323  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
324  "Reference comparison: input 0 is not a supported type");
325 
326  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
327  "Reference comparison: input 0 and Input 1 types are mismatched");
328 
329  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
330  "Reference comparison: output is not of type Boolean");
331 
332  return supported;
333 }
334 
335 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
336  const TensorInfo& output,
337  const ConcatDescriptor& descriptor,
338  Optional<std::string&> reasonIfUnsupported) const
339 {
340  IgnoreUnused(descriptor);
341 
342  bool supported = true;
343  std::array<DataType,6> supportedTypes =
344  {
345  DataType::BFloat16,
346  DataType::Float32,
347  DataType::Float16,
348  DataType::QAsymmS8,
349  DataType::QAsymmU8,
350  DataType::QSymmS16
351  };
352 
353  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
354  "Reference concatenation: output type not supported");
355  for (const TensorInfo* input : inputs)
356  {
357  ARMNN_ASSERT(input != nullptr);
358  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
359  "Reference concatenation: input type not supported");
360 
361  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
362  "Reference concatenation: input and output types mismatched.");
363  }
364 
365  return supported;
366 }
367 
369  Optional<std::string&> reasonIfUnsupported) const
370 {
371  std::array<DataType,8> supportedTypes =
372  {
373  DataType::BFloat16,
374  DataType::Float16,
375  DataType::Float32,
376  DataType::QAsymmS8,
377  DataType::QAsymmU8,
378  DataType::QSymmS8,
379  DataType::QSymmS16,
380  DataType::Signed32
381  };
382 
383  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
384  "Reference constant: output is not a supported type.");
385 }
386 
387 bool RefLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
388  const TensorInfo& output,
389  Optional<std::string&> reasonIfUnsupported) const
390 {
391  bool supported = true;
392 
393  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
394  "Reference for ConvertBf16ToFp32 layer: input type not supported");
395 
396  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
397  "Reference for ConvertBf16ToFp32 layer: output type not supported");
398 
399  return supported;
400 }
401 
403  const TensorInfo& output,
404  Optional<std::string&> reasonIfUnsupported) const
405 {
406  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
407  input.GetDataType(),
408  &TrueFunc<>,
409  &FalseInputFuncF32<>,
410  &FalseFuncU8<>,
411  &FalseFuncI32<>,
412  &FalseFuncU8<>) &&
413  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
414  output.GetDataType(),
415  &FalseOutputFuncF16<>,
416  &TrueFunc<>,
417  &FalseFuncU8<>,
418  &FalseFuncI32<>,
419  &FalseFuncU8<>));
420 }
421 
422 bool RefLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
423  const TensorInfo& output,
424  Optional<std::string&> reasonIfUnsupported) const
425 {
426  bool supported = true;
427 
428  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
429  "Reference for ConvertFp32ToBf16 layer: input type not supported");
430 
431  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
432  "Reference for ConvertFp32ToBf16 layer: output type not supported");
433 
434  return supported;
435 }
436 
438  const TensorInfo& output,
439  Optional<std::string&> reasonIfUnsupported) const
440 {
441  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
442  input.GetDataType(),
443  &FalseInputFuncF16<>,
444  &TrueFunc<>,
445  &FalseFuncU8<>,
446  &FalseFuncI32<>,
447  &FalseFuncU8<>) &&
448  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
449  output.GetDataType(),
450  &TrueFunc<>,
451  &FalseOutputFuncF32<>,
452  &FalseFuncU8<>,
453  &FalseFuncI32<>,
454  &FalseFuncU8<>));
455 }
456 
458  const TensorInfo& output,
459  const Convolution2dDescriptor& descriptor,
460  const TensorInfo& weights,
461  const Optional<TensorInfo>& biases,
462  Optional<std::string&> reasonIfUnsupported) const
463 {
464  bool supported = true;
465 
466  // Define supported types.
467  std::array<DataType,7> supportedTypes =
468  {
469  DataType::BFloat16,
470  DataType::Float32,
471  DataType::Float16,
472  DataType::QAsymmS8,
473  DataType::QAsymmU8,
474  DataType::QSymmS8,
475  DataType::QSymmS16
476  };
477 
478  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
479  "Reference Convolution2d: input is not a supported type.");
480 
481  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
482  "Reference Convolution2d: output is not a supported type.");
483 
484  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
485  if (input.GetDataType() == DataType::BFloat16)
486  {
487  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
488  {
489  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
490  supported = false;
491  }
492  }
493  else
494  {
495  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
496  "Reference Convolution2d: input and output types mismatched.");
497  }
498 
499  const DataType inputType = input.GetDataType();
500  if (IsQuantized8BitType(inputType))
501  {
503  std::array<DataType, 4> supportedWeightTypes =
504  {
505  DataType::QAsymmS8,
506  DataType::QAsymmU8,
507  DataType::QSymmS8,
508  DataType::QuantizedSymm8PerAxis // deprecated
509  };
511 
512  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
513  "Reference Convolution2d: weights type not supported for quantized input.");
514  }
515  else
516  {
517  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
518  "Reference Convolution2d: weights is not a supported type.");
519 
520  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
521  "Reference Convolution2d: input and weights types mismatched.");
522  }
523 
524  if (biases.has_value())
525  {
526  std::array<DataType,4> biasesSupportedTypes =
527  {
528  DataType::BFloat16,
529  DataType::Float32,
530  DataType::Float16,
531  DataType::Signed32
532  };
533 
534  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
535  "Reference Convolution2d: biases is not a supported type.");
536  }
537  IgnoreUnused(descriptor);
538 
539  return supported;
540 }
541 
543  const TensorInfo& output,
544  Optional<std::string&> reasonIfUnsupported) const
545 {
546  bool supported = true;
547 
548  std::array<DataType, 8> supportedTypes =
549  {
550  DataType::BFloat16,
551  DataType::Float16,
552  DataType::Float32,
553  DataType::QAsymmS8,
554  DataType::QAsymmU8,
555  DataType::QSymmS8,
556  DataType::QSymmS16,
557  DataType::Signed32
558  };
559 
560  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
561  "Reference for Debug layer: input type not supported");
562 
563  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
564  "Reference for Debug layer: output type not supported");
565 
566  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
567  "Reference for Debug layer: input and output types are mismatched");
568 
569  return supported;
570 }
571 
572 bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
573  const TensorInfo& output,
574  const DepthToSpaceDescriptor& descriptor,
575  Optional<std::string&> reasonIfUnsupported) const
576 {
577  IgnoreUnused(descriptor);
578  bool supported = true;
579 
580  std::array<DataType,6> supportedTypes =
581  {
582  DataType::BFloat16,
583  DataType::Float32,
584  DataType::Float16,
585  DataType::QAsymmS8,
586  DataType::QAsymmU8,
587  DataType::QSymmS16
588  };
589 
590  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
591  "Reference DepthToSpace: input type not supported");
592 
593  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
594  "Reference DepthToSpace: output type not supported");
595 
596  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
597  "Reference DepthToSpace: input and output types are mismatched");
598 
599  return supported;
600 }
601 
603  const TensorInfo& output,
604  const DepthwiseConvolution2dDescriptor& descriptor,
605  const TensorInfo& weights,
606  const Optional<TensorInfo>& biases,
607  Optional<std::string&> reasonIfUnsupported) const
608 {
609  IgnoreUnused(descriptor);
610  bool supported = true;
611 
612  // Define supported types.
613  std::array<DataType,7> supportedTypes =
614  {
615  DataType::BFloat16,
616  DataType::Float32,
617  DataType::Float16,
618  DataType::QAsymmS8,
619  DataType::QAsymmU8,
620  DataType::QSymmS8,
621  DataType::QSymmS16
622  };
623 
624  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
625  "Reference DepthwiseConvolution2d: input is not a supported type.");
626 
627  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
628  "Reference DepthwiseConvolution2d: output is not a supported type.");
629 
630  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
631  "Reference DepthwiseConvolution2d: input and output types mismatched.");
632 
633  const DataType inputType = input.GetDataType();
634  if (IsQuantized8BitType(inputType))
635  {
637  std::array<DataType, 4> supportedWeightTypes =
638  {
639  DataType::QAsymmS8,
640  DataType::QAsymmU8,
641  DataType::QSymmS8,
642  DataType::QuantizedSymm8PerAxis // deprecated
643  };
645 
646  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
647  "Reference DepthwiseConvolution2d: weights type not supported for "
648  "quantized input.");
649  }
650  else
651  {
652  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
653  "Reference DepthwiseConvolution2d: weights is not a supported type.");
654 
655  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
656  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
657  }
658 
659  if (biases.has_value())
660  {
661  std::array<DataType,4> biasesSupportedTypes =
662  {
663  DataType::BFloat16,
664  DataType::Float32,
665  DataType::Float16,
666  DataType::Signed32
667  };
668  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
669  "Reference DepthwiseConvolution2d: biases is not a supported type.");
670  }
671 
672  return supported;
673 
674 }
675 
677  const TensorInfo& output,
678  Optional<std::string&> reasonIfUnsupported) const
679 {
680  bool supported = true;
681 
682  std::array<DataType,4> supportedInputTypes = {
683  DataType::QAsymmS8,
684  DataType::QAsymmU8,
685  DataType::QSymmS8,
686  DataType::QSymmS16
687  };
688 
689  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
690  "Reference for Dequantize layer: input type not supported.");
691 
692  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
693  "Reference for Dequantize layer: per-axis quantized input not support .");
694 
695  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
696  "Reference dequantize: per-axis quantized input not support .");
697 
698  std::array<DataType,3> supportedOutputTypes = {
699  DataType::BFloat16,
700  DataType::Float32,
701  DataType::Float16
702  };
703 
704  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
705  "Reference for Dequantize layer: output type not supported.");
706 
707  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
708  "Reference for Dequantize layer: input/output shapes have different num total "
709  "elements.");
710 
711  return supported;
712 }
713 
715  const TensorInfo& scores,
716  const TensorInfo& anchors,
717  const TensorInfo& detectionBoxes,
718  const TensorInfo& detectionClasses,
719  const TensorInfo& detectionScores,
720  const TensorInfo& numDetections,
721  const DetectionPostProcessDescriptor& descriptor,
722  Optional<std::string&> reasonIfUnsupported) const
723 {
724  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
725 
726  bool supported = true;
727 
728  std::array<DataType,5> supportedInputTypes =
729  {
730  DataType::BFloat16,
731  DataType::Float32,
732  DataType::QAsymmS8,
733  DataType::QAsymmU8,
734  DataType::QSymmS16
735  };
736 
737  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
738  "Reference DetectionPostProcess: input 0 is not a supported type.");
739 
740  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
741  "Reference DetectionPostProcess: input 1 is not a supported type.");
742 
743  return supported;
744 }
745 
746 bool RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
747  const TensorInfo& output,
748  const DepthwiseConvolution2dDescriptor& descriptor,
749  const TensorInfo& weights,
750  const Optional<TensorInfo>& biases,
751  Optional<std::string&> reasonIfUnsupported) const
752 {
753  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
754 }
755 
757  const TensorInfo& input1,
758  const TensorInfo& output,
759  Optional<std::string&> reasonIfUnsupported) const
760 {
761  bool supported = true;
762 
763  std::array<DataType,7> supportedTypes = {
764  DataType::BFloat16,
765  DataType::Float32,
766  DataType::Float16,
767  DataType::QAsymmS8,
768  DataType::QAsymmU8,
769  DataType::QSymmS16,
770  DataType::Signed32
771  };
772 
773  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
774  "Reference division: input 0 is not a supported type.");
775 
776  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
777  "Reference division: input 1 is not a supported type.");
778 
779  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
780  "Reference division: output is not a supported type.");
781 
782  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
783  "Reference division: input 0 and Input 1 types are mismatched");
784 
785  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
786  "Reference division: input and output types are mismatched");
787 
788  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
789  "Reference division: shapes are not suitable for implicit broadcast.");
790 
791  return supported;
792 }
793 
794 bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
795  const TensorInfo& output,
796  const ElementwiseUnaryDescriptor& descriptor,
797  Optional<std::string&> reasonIfUnsupported) const
798 {
799  IgnoreUnused(descriptor);
800 
801  std::array<DataType, 7> supportedTypes =
802  {
803  DataType::BFloat16,
804  DataType::Float32,
805  DataType::Float16,
806  DataType::QAsymmS8,
807  DataType::QAsymmU8,
808  DataType::QSymmS16,
809  DataType::Signed32
810  };
811 
812  bool supported = true;
813 
814  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
815  "Reference elementwise unary: input type not supported");
816 
817  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
818  "Reference elementwise unary: output type not supported");
819 
820  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
821  "Reference elementwise unary: input and output types not matching");
822 
823  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
824  "Reference elementwise unary: input and output shapes"
825  "have different number of total elements");
826 
827  return supported;
828 }
829 
831  const TensorInfo& input1,
832  const TensorInfo& output,
833  Optional<std::string&> reasonIfUnsupported) const
834 {
835  return IsComparisonSupported(input0,
836  input1,
837  output,
838  ComparisonDescriptor(ComparisonOperation::Equal),
839  reasonIfUnsupported);
840 }
841 
843  const FakeQuantizationDescriptor& descriptor,
844  Optional<std::string&> reasonIfUnsupported) const
845 {
846  IgnoreUnused(descriptor);
847  bool supported = true;
848 
849  std::array<DataType,1> supportedTypes =
850  {
851  DataType::Float32
852  };
853 
854  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
855  "Reference fake quantization: input type not supported.");
856 
857  return supported;
858 }
859 
860 bool RefLayerSupport::IsFillSupported(const TensorInfo& input,
861  const TensorInfo& output,
862  const FillDescriptor& descriptor,
863  Optional<std::string&> reasonIfUnsupported) const
864 {
865  IgnoreUnused(descriptor);
866  IgnoreUnused(output);
867 
868  bool supported = true;
869 
870  std::array<DataType,3> supportedTypes =
871  {
872  DataType::Float32,
873  DataType::Float16,
874  DataType::Signed32
875  };
876 
877  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
878  "Reference Fill: input type not supported.");
879 
880  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
881  "Reference Fill: output type not supported.");
882  return supported;
883 }
884 
886  const TensorInfo& output,
887  Optional<std::string&> reasonIfUnsupported) const
888 {
889  IgnoreUnused(output);
890  bool supported = true;
891 
892  std::array<DataType,3> supportedTypes =
893  {
894  DataType::BFloat16,
895  DataType::Float32,
896  DataType::Float16
897  };
898 
899  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
900  "Reference Floor: input type not supported.");
901 
902  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
903  "Reference Floor: output type not supported.");
904 
905  return supported;
906 }
907 
909  const TensorInfo& output,
910  const TensorInfo& weights,
911  const TensorInfo& biases,
912  const FullyConnectedDescriptor& descriptor,
913  Optional<std::string&> reasonIfUnsupported) const
914 {
915  bool supported = true;
916 
917  // Define supported types.
918  std::array<DataType,6> supportedTypes =
919  {
920  DataType::BFloat16,
921  DataType::Float32,
922  DataType::Float16,
923  DataType::QAsymmS8,
924  DataType::QAsymmU8,
925  DataType::QSymmS16
926  };
927 
928  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
929  "Reference Fully Connected: input type not supported.");
930 
931  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
932  "Reference Fully Connected: output type not supported.");
933 
934  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
935  "Reference Fully Connected: weights type not supported.");
936 
937  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
938  if (input.GetDataType() == DataType::BFloat16)
939  {
940  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
941  {
942  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
943  supported = false;
944  }
945  }
946  else
947  {
948  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
949  "Reference Fully Connected: input and output types mismatched.");
950  }
951 
952  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
953  "Reference Fully Connected: weights is not a supported type.");
954 
955  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
956  "Reference Fully Connected: input and weights types mismatched.");
957 
958  if (descriptor.m_BiasEnabled)
959  {
960  // Defined supported types for bias
961  std::array<DataType, 5>
962  supportedBiasTypes =
963  {
964  DataType::BFloat16,
965  DataType::Float32,
966  DataType::Float16,
967  DataType::Signed32,
968  DataType::QAsymmS8
969  };
970 
971  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
972  "Reference Fully Connected: bias type not supported.");
973 
974  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
975  "Reference Fully Connected: bias and weight types mismatch.");
976 
977  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
978  "Reference Fully Connected: bias type inferred from weights is incompatible.");
979 
980  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
981  "Reference Fully Connected: bias must have 1 dimension.");
982 
983  }
984 
985  return supported;
986 }
987 
989  const armnn::TensorInfo& input1,
990  const armnn::TensorInfo& output,
991  const GatherDescriptor& descriptor,
992  armnn::Optional<std::string&> reasonIfUnsupported) const
993 {
994  bool supported = true;
995  std::array<DataType,7> supportedTypes =
996  {
997  DataType::BFloat16,
998  DataType::Float32,
999  DataType::Float16,
1000  DataType::QAsymmS8,
1001  DataType::QAsymmU8,
1002  DataType::QSymmS16,
1003  DataType::Signed32
1004  };
1005 
1006  if (descriptor.m_Axis != 0)
1007  {
1008  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1009  supported &= false;
1010  }
1011  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1012  "Reference Gather: input type not supported");
1013 
1014  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1015  "Reference Gather: output type not supported");
1016 
1017  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1018  "Reference Gather: indices (input1) type not supported");
1019 
1020  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1021  "Reference Gather: input and output types not matching");
1022 
1023  return supported;
1024 }
1025 
1027  const TensorInfo& input1,
1028  const TensorInfo& output,
1029  Optional<std::string&> reasonIfUnsupported) const
1030 {
1031  return IsComparisonSupported(input0,
1032  input1,
1033  output,
1034  ComparisonDescriptor(ComparisonOperation::Greater),
1035  reasonIfUnsupported);
1036 }
1037 
1039  Optional<std::string&> /*reasonIfUnsupported*/) const
1040 {
1041  return true;
1042 }
1043 
1044 bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1045  const TensorInfo& output,
1046  const InstanceNormalizationDescriptor& descriptor,
1047  Optional<std::string&> reasonIfUnsupported) const
1048 {
1049  IgnoreUnused(descriptor);
1050  // Define supported types
1051  std::array<DataType, 3> supportedTypes =
1052  {
1053  DataType::BFloat16,
1054  DataType::Float32,
1055  DataType::Float16
1056  };
1057 
1058  bool supported = true;
1059 
1060  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1061  "Reference Instance Normalization: input type not supported.");
1062 
1063  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1064  "Reference Instance Normalization: output type not supported.");
1065 
1066  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1067  "Reference Instance Normalization: input and output types mismatched.");
1068 
1069  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1070  "Reference Instance Normalization: input and output shapes have different "
1071  "num total elements.");
1072 
1073  return supported;
1074 }
1075 
1077  const TensorInfo& output,
1078  const L2NormalizationDescriptor& descriptor,
1079  Optional<std::string&> reasonIfUnsupported) const
1080 {
1081  IgnoreUnused(descriptor);
1082  // Define supported types
1083  std::array<DataType, 6> supportedTypes =
1084  {
1085  DataType::BFloat16,
1086  DataType::Float32,
1087  DataType::Float16,
1088  DataType::QAsymmS8,
1089  DataType::QAsymmU8,
1090  DataType::QSymmS16
1091  };
1092 
1093  bool supported = true;
1094 
1095  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1096  "Reference L2normalization: input type not supported.");
1097 
1098  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1099  "Reference L2normalization: output type not supported.");
1100 
1101  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1102  "Reference L2normalization: input and output types mismatched.");
1103 
1104  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1105  "Reference L2normalization: input and output shapes have different "
1106  "num total elements.");
1107 
1108  return supported;
1109 }
1110 
1111 bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1112  const TensorInfo& output,
1113  const LogSoftmaxDescriptor& descriptor,
1114  Optional<std::string&> reasonIfUnsupported) const
1115 {
1116  IgnoreUnused(descriptor);
1117 
1118  std::array<DataType, 3> supportedTypes =
1119  {
1120  DataType::BFloat16,
1121  DataType::Float32,
1122  DataType::Float16
1123  };
1124 
1125  bool supported = true;
1126  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1127  "Reference LogSoftmax: input type not supported");
1128 
1129  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1130  "Reference LogSoftmax: output type not supported");
1131 
1132  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1133  "Reference LogSoftmax: input and output types do not match");
1134 
1135  return supported;
1136 }
1137 
1139  const TensorInfo& outputStateIn,
1140  const TensorInfo& cellStateIn,
1141  const TensorInfo& scratchBuffer,
1142  const TensorInfo& outputStateOut,
1143  const TensorInfo& cellStateOut,
1144  const TensorInfo& output,
1145  const LstmDescriptor& descriptor,
1146  const LstmInputParamsInfo& paramsInfo,
1147  Optional<std::string&> reasonIfUnsupported) const
1148 {
1149  IgnoreUnused(descriptor);
1150  IgnoreUnused(paramsInfo);
1151 
1152  bool supported = true;
1153 
1154  std::array<DataType,3> supportedTypes = {
1155  DataType::BFloat16,
1156  DataType::Float32,
1157  DataType::QSymmS16
1158  };
1159 
1160  // check inputs and outputs
1161  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1162  "Reference Lstm: input is not a supported type.");
1163  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1164  "Reference Lstm: input and outputStateIn types are mismatched");
1165  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1166  "Reference Lstm: input and cellStateIn types are mismatched");
1167  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1168  "Reference Lstm: input and scratchBuffer types are mismatched");
1169  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1170  "Reference Lstm: input and outputStateOut types are mismatched");
1171  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1172  "Reference Lstm: input and cellStateOut types are mismatched");
1173  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1174  "Reference Lstm: input and output types are mismatched");
1175  // check layer parameters
1176  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1177  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1178  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1179  "Reference Lstm: input and InputToCellWeights types are mismatched");
1180  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1181  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1182  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1183  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1184  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1185  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1186  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1187  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1188  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1189  "Reference Lstm: input and ForgetGateBias types are mismatched");
1190  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1191  "Reference Lstm: input and CellBias types are mismatched");
1192  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1193  "Reference Lstm: input and OutputGateBias types are mismatched");
1194  if (!descriptor.m_CifgEnabled)
1195  {
1196  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1197  "Reference Lstm: input and InputToInputWeights types are mismatched");
1198  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1199  reasonIfUnsupported,
1200  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1201  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1202  "Reference Lstm: input and InputGateBias types are mismatched");
1203  if (descriptor.m_PeepholeEnabled)
1204  {
1205  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1206  reasonIfUnsupported,
1207  "Reference Lstm: input and CellToInputWeights types are mismatched");
1208  }
1209  }
1210  if (descriptor.m_PeepholeEnabled)
1211  {
1212  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1213  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1214  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1215  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1216  }
1217  if (descriptor.m_ProjectionEnabled)
1218  {
1219  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1220  "Reference Lstm: input and mProjectionWeights types are mismatched");
1221  if (paramsInfo.m_ProjectionBias != nullptr)
1222  {
1223  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1224  "Reference Lstm: input and ProjectionBias types are mismatched");
1225  }
1226  }
1227  if (descriptor.m_LayerNormEnabled)
1228  {
1229  if (!descriptor.m_CifgEnabled)
1230  {
1231  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1232  reasonIfUnsupported,
1233  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1234  }
1235  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1236  reasonIfUnsupported,
1237  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1238  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1239  reasonIfUnsupported,
1240  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1241  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1242  reasonIfUnsupported,
1243  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1244  }
1245 
1246  return supported;
1247 }
1248 
1250  const TensorInfo& input1,
1251  const TensorInfo& output,
1252  Optional<std::string&> reasonIfUnsupported) const
1253 {
1254  bool supported = true;
1255 
1256  std::array<DataType,7> supportedTypes = {
1257  DataType::BFloat16,
1258  DataType::Float32,
1259  DataType::Float16,
1260  DataType::QAsymmS8,
1261  DataType::QAsymmU8,
1262  DataType::QSymmS16,
1263  DataType::Signed32
1264  };
1265 
1266  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1267  "Reference maximum: input 0 is not a supported type.");
1268 
1269  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1270  "Reference maximum: input 1 is not a supported type.");
1271 
1272  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1273  "Reference maximum: output is not a supported type.");
1274 
1275  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1276  "Reference maximum: input 0 and Input 1 types are mismatched");
1277 
1278  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1279  "Reference maximum: input and output types are mismatched");
1280 
1281  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1282  "Reference maximum: shapes are not suitable for implicit broadcast.");
1283 
1284  return supported;
1285 }
1286 
1288  const TensorInfo& output,
1289  const MeanDescriptor& descriptor,
1290  Optional<std::string&> reasonIfUnsupported) const
1291 {
1292  bool supported = true;
1293  std::string meanLayerStr = "Mean";
1294  std::string outputTensorStr = "output";
1295 
1296  std::array<DataType,6> supportedTypes =
1297  {
1298  DataType::BFloat16,
1299  DataType::Float32,
1300  DataType::Float16,
1301  DataType::QAsymmS8,
1302  DataType::QAsymmU8,
1303  DataType::QSymmS16
1304  };
1305 
1306  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1307  "Reference Mean: input type not supported.");
1308 
1309  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1310  "Reference Mean: input and output types are mismatched");
1311 
1312  if (descriptor.m_KeepDims)
1313  {
1314  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1315  reasonIfUnsupported,
1316  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1317  output.GetNumDimensions(),
1318  meanLayerStr, outputTensorStr).data());
1319  }
1320  else if (descriptor.m_Axis.empty())
1321  {
1322  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1323  reasonIfUnsupported,
1324  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1325  meanLayerStr, outputTensorStr).data());
1326  }
1327  else
1328  {
1329  auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1330 
1331  if (outputDim > 0)
1332  {
1333  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1334  reasonIfUnsupported,
1335  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1336  meanLayerStr, outputTensorStr).data());
1337  }
1338  else
1339  {
1340  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1341  reasonIfUnsupported,
1342  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1343  meanLayerStr, outputTensorStr).data());
1344  }
1345  }
1346 
1347  return supported;
1348 }
1349 
1350 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
1351  const TensorInfo& output,
1352  const MergerDescriptor& descriptor,
1353  Optional<std::string&> reasonIfUnsupported) const
1354 {
1355  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1356 }
1357 
1359  const TensorInfo &output,
1360  Optional<std::string &> reasonIfUnsupported) const
1361 {
1362  bool supported = true;
1363 
1364  std::array<DataType,7> supportedTypes =
1365  {
1366  DataType::BFloat16,
1367  DataType::Float32,
1368  DataType::Float16,
1369  DataType::QAsymmS8,
1370  DataType::QAsymmU8,
1371  DataType::QSymmS16,
1372  DataType::Boolean
1373  };
1374 
1375  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1376  "Reference MemCopy: input type not supported");
1377 
1378  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1379  "Reference MemCopy: output type not supported");
1380 
1381  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1382  "Reference MemCopy: input and output types are mismatched");
1383 
1384  return supported;
1385 }
1386 
1388  const TensorInfo& input1,
1389  const TensorInfo& output,
1390  Optional<std::string&> reasonIfUnsupported) const
1391 {
1392  bool supported = true;
1393 
1394  std::array<DataType,7> supportedTypes = {
1395  DataType::BFloat16,
1396  DataType::Float32,
1397  DataType::Float16,
1398  DataType::QAsymmS8,
1399  DataType::QAsymmU8,
1400  DataType::QSymmS16,
1401  DataType::Signed32
1402  };
1403 
1404  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1405  "Reference minimum: input 0 is not a supported type.");
1406 
1407  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1408  "Reference minimum: input 1 is not a supported type.");
1409 
1410  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1411  "Reference minimum: output is not a supported type.");
1412 
1413  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1414  "Reference minimum: input 0 and Input 1 types are mismatched");
1415 
1416  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1417  "Reference minimum: input and output types are mismatched");
1418 
1419  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1420  "Reference minimum: shapes are not suitable for implicit broadcast.");
1421 
1422  return supported;
1423 }
1424 
1426  const TensorInfo& input1,
1427  const TensorInfo& output,
1428  Optional<std::string&> reasonIfUnsupported) const
1429 {
1430  bool supported = true;
1431 
1432  std::array<DataType,7> supportedTypes = {
1433  DataType::BFloat16,
1434  DataType::Float32,
1435  DataType::Float16,
1436  DataType::QAsymmS8,
1437  DataType::QAsymmU8,
1438  DataType::QSymmS16,
1439  DataType::Signed32
1440  };
1441 
1442  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1443  "Reference multiplication: input 0 is not a supported type.");
1444 
1445  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1446  "Reference multiplication: input 1 is not a supported type.");
1447 
1448  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1449  "Reference multiplication: output is not a supported type.");
1450 
1451  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1452  "Reference multiplication: input 0 and Input 1 types are mismatched");
1453 
1454  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1455  "Reference multiplication: input and output types are mismatched");
1456 
1457  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1458  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1459 
1460  return supported;
1461 }
1462 
1464  const TensorInfo& output,
1465  const NormalizationDescriptor& descriptor,
1466  Optional<std::string&> reasonIfUnsupported) const
1467 {
1468  IgnoreUnused(descriptor);
1469 
1470  // Define supported types
1471  std::array<DataType, 6> supportedTypes =
1472  {
1473  DataType::BFloat16,
1474  DataType::Float16,
1475  DataType::Float32,
1476  DataType::QAsymmS8,
1477  DataType::QAsymmU8,
1478  DataType::QSymmS16
1479  };
1480 
1481  bool supported = true;
1482 
1483  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1484  "Reference normalization: input type not supported.");
1485 
1486  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1487  "Reference normalization: output type not supported.");
1488 
1489  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1490  "Reference normalization: input and output shapes have different "
1491  "num total elements.");
1492 
1493  return supported;
1494 }
1495 
1497  Optional<std::string&> /*reasonIfUnsupported*/) const
1498 {
1499  return true;
1500 }
1501 
1503  const TensorInfo& output,
1504  const PadDescriptor& descriptor,
1505  Optional<std::string&> reasonIfUnsupported) const
1506 {
1507  IgnoreUnused(descriptor);
1508  bool supported = true;
1509 
1510  // Define supported output and inputs types.
1511  std::array<DataType,6> supportedTypes =
1512  {
1513  DataType::BFloat16,
1514  DataType::Float32,
1515  DataType::Float16,
1516  DataType::QAsymmS8,
1517  DataType::QAsymmU8,
1518  DataType::QSymmS16
1519  };
1520 
1521  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1522  "Reference pad: input is not a supported type.");
1523 
1524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1525  "Reference pad: output is not a supported type.");
1526 
1527  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1528  "Reference pad: input and output types are mismatched.");
1529 
1530  return supported;
1531 }
1532 
1534  const TensorInfo& output,
1535  const PermuteDescriptor& descriptor,
1536  Optional<std::string&> reasonIfUnsupported) const
1537 {
1538  IgnoreUnused(descriptor);
1539  bool supported = true;
1540 
1541  // Define supported output and inputs types.
1542  std::array<DataType, 6> supportedTypes =
1543  {
1544  DataType::BFloat16,
1545  DataType::Float32,
1546  DataType::Float16,
1547  DataType::QAsymmS8,
1548  DataType::QAsymmU8,
1549  DataType::QSymmS16
1550  };
1551 
1552  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1553  "Reference permute: input is not a supported type.");
1554 
1555  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1556  "Reference permute: output is not a supported type.");
1557 
1558  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1559  "Reference permute: input and output types are mismatched.");
1560 
1561  return supported;
1562 }
1563 
1565  const TensorInfo& output,
1566  const Pooling2dDescriptor& descriptor,
1567  Optional<std::string&> reasonIfUnsupported) const
1568 {
1569  IgnoreUnused(descriptor);
1570  bool supported = true;
1571 
1572  // Define supported output and inputs types.
1573  std::array<DataType,6> supportedTypes =
1574  {
1575  DataType::BFloat16,
1576  DataType::Float32,
1577  DataType::Float16,
1578  DataType::QAsymmS8,
1579  DataType::QAsymmU8,
1580  DataType::QSymmS16
1581  };
1582 
1583  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1584  "Reference poolind2d: input is not a supported type.");
1585 
1586  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1587  "Reference poolind2d: output is not a supported type.");
1588 
1589  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1590  "Reference poolind2d: input and output types are mismatched.");
1591 
1592  return supported;
1593 }
1594 
1596  const TensorInfo& previousOutputIn,
1597  const TensorInfo& previousCellStateIn,
1598  const TensorInfo& outputStateOut,
1599  const TensorInfo& cellStateOut,
1600  const TensorInfo& output,
1601  const QLstmDescriptor& descriptor,
1602  const LstmInputParamsInfo& paramsInfo,
1603  Optional<std::string&> reasonIfUnsupported) const
1604 {
1605  IgnoreUnused(input);
1606  IgnoreUnused(previousOutputIn);
1607  IgnoreUnused(previousCellStateIn);
1608  IgnoreUnused(outputStateOut);
1609  IgnoreUnused(cellStateOut);
1610  IgnoreUnused(output);
1611  IgnoreUnused(descriptor);
1612  IgnoreUnused(paramsInfo);
1613 
1614  IgnoreUnused(reasonIfUnsupported);
1615 
1616  return true;
1617 }
1618 
1620  const TensorInfo& output,
1621  Optional<std::string&> reasonIfUnsupported) const
1622 {
1623  bool supported = true;
1624 
1625  // Define supported input types.
1626  std::array<DataType,7> supportedInputTypes = {
1627  DataType::BFloat16,
1628  DataType::Float32,
1629  DataType::Float16,
1630  DataType::QAsymmS8,
1631  DataType::QAsymmU8,
1632  DataType::QSymmS8,
1633  DataType::QSymmS16
1634  };
1635 
1636  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1637  "Reference quantize: input type not supported.");
1638 
1639  // Define supported output types.
1640  std::array<DataType,4> supportedOutputTypes = {
1641  DataType::QAsymmS8,
1642  DataType::QAsymmU8,
1643  DataType::QSymmS8,
1644  DataType::QSymmS16
1645  };
1646  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1647  "Reference quantize: output type not supported.");
1648 
1649  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1650  "Reference quantize: input and output shapes have different num total elements.");
1651 
1652  return supported;
1653 }
1654 
1655 bool RefLayerSupport::IsRankSupported(const TensorInfo& input,
1656  const TensorInfo& output,
1657  Optional<std::string&> reasonIfUnsupported) const
1658 {
1659  IgnoreUnused(input);
1660  // Define supported output types.
1661  std::array<DataType,1> supportedOutputTypes =
1662  {
1663  DataType::Signed32,
1664  };
1665 
1666  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1667  "Reference rank: input type not supported.");
1668 }
1669 
1671  const TensorInfo& output,
1672  const ReshapeDescriptor& descriptor,
1673  Optional<std::string&> reasonIfUnsupported) const
1674 {
1675  IgnoreUnused(output);
1676  IgnoreUnused(descriptor);
1677  // Define supported output types.
1678  std::array<DataType,7> supportedOutputTypes =
1679  {
1680  DataType::BFloat16,
1681  DataType::Float32,
1682  DataType::Float16,
1683  DataType::Signed32,
1684  DataType::QAsymmS8,
1685  DataType::QAsymmU8,
1686  DataType::QSymmS16
1687  };
1688 
1689  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1690  "Reference reshape: input type not supported.");
1691 }
1692 
1694  const TensorInfo& output,
1695  Optional<std::string&> reasonIfUnsupported) const
1696 {
1697  bool supported = true;
1698  std::array<DataType,6> supportedTypes =
1699  {
1700  DataType::BFloat16,
1701  DataType::Float32,
1702  DataType::Float16,
1703  DataType::QAsymmS8,
1704  DataType::QAsymmU8,
1705  DataType::QSymmS16
1706  };
1707 
1708  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1709  "Reference ResizeBilinear: input type not supported");
1710 
1711  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1712  "Reference ResizeBilinear: output type not supported");
1713 
1714  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1715  "Reference ResizeBilinear: input and output types not matching");
1716 
1717  return supported;
1718 }
1719 
1721  const TensorInfo& output,
1722  const ResizeDescriptor& descriptor,
1723  Optional<std::string&> reasonIfUnsupported) const
1724 {
1725  IgnoreUnused(descriptor);
1726  bool supported = true;
1727  std::array<DataType,6> supportedTypes =
1728  {
1729  DataType::BFloat16,
1730  DataType::Float32,
1731  DataType::Float16,
1732  DataType::QAsymmS8,
1733  DataType::QAsymmU8,
1734  DataType::QSymmS16
1735  };
1736 
1737  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1738  "Reference Resize: input type not supported");
1739 
1740  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1741  "Reference Resize: output type not supported");
1742 
1743  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1744  "Reference Resize: input and output types not matching");
1745 
1746  return supported;
1747 }
1748 
1750  const TensorInfo& output,
1751  Optional<std::string&> reasonIfUnsupported) const
1752 {
1753  return IsElementwiseUnarySupported(input,
1754  output,
1755  ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
1756  reasonIfUnsupported);
1757 }
1758 
1759 bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
1760  const TensorInfo& output,
1761  const SliceDescriptor& descriptor,
1762  Optional<std::string&> reasonIfUnsupported) const
1763 {
1764  IgnoreUnused(descriptor);
1765  bool supported = true;
1766 
1767  std::array<DataType, 5> supportedTypes =
1768  {
1769  DataType::BFloat16,
1770  DataType::Float32,
1771  DataType::QAsymmS8,
1772  DataType::QAsymmU8,
1773  DataType::QSymmS16
1774  };
1775 
1776  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1777  "Reference Slice: input type not supported");
1778 
1779  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1780  "Reference Slice: output type not supported");
1781 
1782  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1783  "Reference Slice: input and output types are mismatched");
1784 
1785  return supported;
1786 }
1787 
1789  const TensorInfo& output,
1790  const SoftmaxDescriptor& descriptor,
1791  Optional<std::string&> reasonIfUnsupported) const
1792 {
1793  IgnoreUnused(descriptor);
1794  bool supported = true;
1795  std::array<DataType,7> supportedTypes =
1796  {
1797  DataType::BFloat16,
1798  DataType::Float32,
1799  DataType::Float16,
1800  DataType::QSymmS8,
1801  DataType::QAsymmS8,
1802  DataType::QAsymmU8,
1803  DataType::QSymmS16
1804  };
1805 
1806  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1807  "Reference Softmax: output type not supported");
1808 
1809  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1810  "Reference Softmax: input type not supported");
1811 
1812  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1813  "Reference Softmax: input type not supported");
1814 
1815  return supported;
1816 }
1817 
1819  const TensorInfo& output,
1820  const SpaceToBatchNdDescriptor& descriptor,
1821  Optional<std::string&> reasonIfUnsupported) const
1822 {
1823  IgnoreUnused(descriptor);
1824  bool supported = true;
1825  std::array<DataType,6> supportedTypes =
1826  {
1827  DataType::BFloat16,
1828  DataType::Float32,
1829  DataType::Float16,
1830  DataType::QAsymmS8,
1831  DataType::QAsymmU8,
1832  DataType::QSymmS16
1833  };
1834 
1835  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1836  "Reference SpaceToBatchNd: input type not supported");
1837 
1838  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1839  "Reference SpaceToBatchNd: output type not supported");
1840 
1841  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1842  "Reference SpaceToBatchNd: input and output types are mismatched");
1843 
1844  return supported;
1845 }
1846 
1848  const TensorInfo& output,
1849  const SpaceToDepthDescriptor& descriptor,
1850  Optional<std::string&> reasonIfUnsupported) const
1851 {
1852 
1853  IgnoreUnused(descriptor);
1854  bool supported = true;
1855 
1856  std::array<DataType,6> supportedTypes =
1857  {
1858  DataType::BFloat16,
1859  DataType::Float32,
1860  DataType::Float16,
1861  DataType::QAsymmS8,
1862  DataType::QAsymmU8,
1863  DataType::QSymmS16
1864  };
1865 
1866  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1867  "Reference SpaceToDepth: input type not supported");
1868 
1869  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1870  "Reference SpaceToDepth: output type not supported");
1871 
1872  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1873  "Reference SpaceToDepth: input and output types are mismatched");
1874 
1875  return supported;
1876 }
1877 
1879  const ViewsDescriptor& descriptor,
1880  Optional<std::string&> reasonIfUnsupported) const
1881 {
1882  IgnoreUnused(descriptor);
1883  bool supported = true;
1884  std::array<DataType,6> supportedTypes =
1885  {
1886  DataType::BFloat16,
1887  DataType::Float32,
1888  DataType::Float16,
1889  DataType::QAsymmS8,
1890  DataType::QAsymmU8,
1891  DataType::QSymmS16
1892  };
1893 
1894  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1895  "Reference splitter: input type not supported");
1896 
1897  return supported;
1898 }
1899 
1901  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1902  const ViewsDescriptor& descriptor,
1903  Optional<std::string&> reasonIfUnsupported) const
1904 {
1905  IgnoreUnused(descriptor);
1906  bool supported = true;
1907  std::array<DataType,6> supportedTypes =
1908  {
1909  DataType::BFloat16,
1910  DataType::Float32,
1911  DataType::Float16,
1912  DataType::QAsymmS8,
1913  DataType::QAsymmU8,
1914  DataType::QSymmS16
1915  };
1916 
1917  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1918  "Reference splitter: output type not supported");
1919  for (const TensorInfo output : outputs)
1920  {
1921  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1922  "Reference splitter: input type not supported");
1923 
1924  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1925  "Reference splitter: input and output types mismatched.");
1926  }
1927 
1928  return supported;
1929 }
1930 
1931 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1932  const TensorInfo& output,
1933  const StackDescriptor& descriptor,
1934  Optional<std::string&> reasonIfUnsupported) const
1935 {
1936  IgnoreUnused(descriptor);
1937 
1938  bool supported = true;
1939  std::array<DataType,6> supportedTypes =
1940  {
1941  DataType::BFloat16,
1942  DataType::Float32,
1943  DataType::Float16,
1944  DataType::QAsymmS8,
1945  DataType::QAsymmU8,
1946  DataType::QSymmS16
1947  };
1948 
1949  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1950  "Reference stack: output type not supported");
1951  for (const TensorInfo* input : inputs)
1952  {
1953  ARMNN_ASSERT(input != nullptr);
1954  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
1955  "Reference stack: input type not supported");
1956 
1957  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
1958  "Reference stack: input and output types mismatched.");
1959  }
1960 
1961  return supported;
1962 }
1963 
1965  const TensorInfo& output,
1966  const StridedSliceDescriptor& descriptor,
1967  Optional<std::string&> reasonIfUnsupported) const
1968 {
1969  IgnoreUnused(descriptor);
1970  bool supported = true;
1971 
1972  std::array<DataType,5> supportedTypes =
1973  {
1974  DataType::BFloat16,
1975  DataType::Float32,
1976  DataType::QAsymmS8,
1977  DataType::QAsymmU8,
1978  DataType::QSymmS16
1979  };
1980 
1981  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1982  "Reference StridedSlice: input type not supported");
1983 
1984  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1985  "Reference StridedSlice: output type not supported");
1986 
1987  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1988  "Reference StridedSlice: input and output types are mismatched");
1989 
1990  return supported;
1991 }
1992 
1994  const TensorInfo& input1,
1995  const TensorInfo& output,
1996  Optional<std::string&> reasonIfUnsupported) const
1997 {
1998  bool supported = true;
1999 
2000  std::array<DataType,7> supportedTypes = {
2001  DataType::BFloat16,
2002  DataType::Float32,
2003  DataType::Float16,
2004  DataType::QAsymmS8,
2005  DataType::QAsymmU8,
2006  DataType::QSymmS16,
2007  DataType::Signed32
2008  };
2009 
2010  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2011  "Reference subtraction: input 0 is not a supported type.");
2012 
2013  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2014  "Reference subtraction: input 1 is not a supported type.");
2015 
2016  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2017  "Reference subtraction: output is not a supported type.");
2018 
2019  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2020  "Reference subtraction: input 0 and Input 1 types are mismatched");
2021 
2022  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2023  "Reference subtraction: input and output types are mismatched");
2024 
2025  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2026  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2027 
2028  return supported;
2029 }
2030 
2032  const TensorInfo& alpha,
2033  const TensorInfo& output,
2034  Optional<std::string&> reasonIfUnsupported) const
2035 {
2036  bool supported = true;
2037 
2038  std::array<DataType, 6> supportedTypes
2039  {
2040  DataType::BFloat16,
2041  DataType::Float32,
2042  DataType::Float16,
2043  DataType::QAsymmS8,
2044  DataType::QAsymmU8,
2045  DataType::QSymmS16
2046  };
2047 
2048  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2049  "PReLU: input is not a supported type.");
2050 
2051  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2052  "PReLU: alpha is not a supported type.");
2053 
2054  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2055  "PReLU: output is not a supported type.");
2056 
2057  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2058  "PReLU: input, alpha and output types are mismatched");
2059 
2060  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2061  "PReLU: shapes are not suitable for implicit broadcast");
2062 
2063  return supported;
2064 }
2065 
2067  const TensorInfo& output,
2068  const TransposeConvolution2dDescriptor& descriptor,
2069  const TensorInfo& weights,
2070  const Optional<TensorInfo>& biases,
2071  Optional<std::string&> reasonIfUnsupported) const
2072 {
2073  IgnoreUnused(descriptor);
2074  bool supported = true;
2075 
2076  std::array<DataType,7> supportedTypes =
2077  {
2078  DataType::BFloat16,
2079  DataType::Float32,
2080  DataType::Float16,
2081  DataType::QAsymmS8,
2082  DataType::QAsymmU8,
2083  DataType::QSymmS8,
2084  DataType::QSymmS16
2085  };
2086 
2087  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2088  "Reference TransposeConvolution2d: input is not a supported type.");
2089 
2090  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2091  "Reference TransposeConvolution2d: output is not a supported type.");
2092 
2093  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2094  "Reference TransposeConvolution2d: input and output types mismatched.");
2095 
2096 
2097  const DataType inputType = input.GetDataType();
2098  if (IsQuantized8BitType(inputType))
2099  {
2101  std::array<DataType, 4> supportedWeightTypes =
2102  {
2103  DataType::QAsymmS8,
2104  DataType::QAsymmU8,
2105  DataType::QSymmS8,
2106  DataType::QuantizedSymm8PerAxis //Deprecated
2107  };
2109 
2110  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2111  "Reference TransposeConvolution2d: weights type not supported for "
2112  "quantized input.");
2113  }
2114  else
2115  {
2116  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2117  "Reference TransposeConvolution2d: weights is not a supported type.");
2118 
2119  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2120  "Reference TransposeConvolution2d: input and weights types mismatched.");
2121  }
2122 
2123  if (biases.has_value())
2124  {
2125  std::array<DataType,4> biasesSupportedTypes =
2126  {
2127  DataType::BFloat16,
2128  DataType::Float32,
2129  DataType::Float16,
2130  DataType::Signed32
2131  };
2132  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2133  "Reference TransposeConvolution2d: biases is not a supported type.");
2134  }
2135 
2136  return supported;
2137 }
2138 
2139 bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input,
2140  const TensorInfo& output,
2141  const TransposeDescriptor& descriptor,
2142  Optional<std::string&> reasonIfUnsupported) const
2143 {
2144  IgnoreUnused(descriptor);
2145  bool supported = true;
2146 
2147  // Define supported output and inputs types.
2148  std::array<DataType, 6> supportedTypes =
2149  {
2150  DataType::BFloat16,
2151  DataType::Float32,
2152  DataType::Float16,
2153  DataType::QAsymmS8,
2154  DataType::QAsymmU8,
2155  DataType::QSymmS16
2156  };
2157 
2158  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2159  "Reference transpose: input is not a supported type.");
2160 
2161  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2162  "Reference transpose: output is not a supported type.");
2163 
2164  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2165  "Reference transpose: input and output types are mismatched.");
2166 
2167  return supported;
2168 }
2169 
2170 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:70
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsDebugSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsEqualSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Copyright (c) 2020 ARM Limited.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
A ResizeDescriptor for the ResizeLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:241
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMemCopySupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
A PadDescriptor for the PadLayer.
DataType
Definition: Types.hpp:32
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsRsqrtSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsFakeQuantizationSupported(const BackendId &backend, const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_BiasEnabled
Enable/disable bias.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStackSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const StackDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsResizeBilinearSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A QLstmDescriptor for the QLstmLayer.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
A SliceDescriptor for the SliceLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:90
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A MeanDescriptor for the MeanLayer.
bool IsMergerSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsGatherSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsDetectionPostProcessSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const DetectionPostProcessDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A PermuteDescriptor for the PermuteLayer.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })