ArmNN
 20.05
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/Descriptors.hpp>
12 
13 #include <LayerSupportCommon.hpp>
15 
16 #include <boost/cast.hpp>
17 
18 #include <vector>
19 #include <array>
20 
21 using namespace boost;
22 
23 namespace armnn
24 {
25 
26 namespace
27 {
28 
29 template<typename Float32Func, typename Uint8Func, typename ... Params>
30 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
31  DataType dataType,
32  Float32Func floatFuncPtr,
33  Uint8Func uint8FuncPtr,
34  Params&&... params)
35 {
36  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
37  dataType,
38  &FalseFunc<Params...>,
39  floatFuncPtr,
40  uint8FuncPtr,
41  &FalseFunc<Params...>,
42  &FalseFunc<Params...>,
43  std::forward<Params>(params)...);
44 }
45 
46 } // anonymous namespace
47 
48 namespace
49 {
50 
51 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
52  unsigned int actual,
53  std::string& layerStr,
54  std::string& tensorName)
55 {
56  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
57  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
58 
59  return errorMsg;
60 }
61 
62 } // anonymous namespace
63 
64 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
65  Optional<std::string&> reasonIfUnsupported) const
66 {
67  return IsElementwiseUnarySupported(input,
68  output,
69  ElementwiseUnaryDescriptor(UnaryOperation::Abs),
70  reasonIfUnsupported);
71 }
72 
74  const TensorInfo& output,
75  const ActivationDescriptor& descriptor,
76  Optional<std::string&> reasonIfUnsupported) const
77 {
78  bool supported = true;
79 
80  // Define supported types.
81  std::array<DataType,6> supportedTypes = {
82  DataType::BFloat16,
83  DataType::Float32,
84  DataType::Float16,
85  DataType::QAsymmS8,
86  DataType::QAsymmU8,
87  DataType::QSymmS16
88  };
89 
90  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
91  "Reference activation: input type not supported.");
92 
93  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
94  "Reference activation: output type not supported.");
95 
96  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
97  "Reference activation: input and output types mismatched.");
98 
99  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
100  "Reference activation: input and output shapes are of different rank.");
101 
102 
103  struct ActivationFunctionSupported : public Rule
104  {
105  ActivationFunctionSupported(const ActivationDescriptor& desc)
106  {
107  switch(desc.m_Function)
108  {
109  case ActivationFunction::Abs:
110  case ActivationFunction::BoundedReLu:
111  case ActivationFunction::Elu:
112  case ActivationFunction::HardSwish:
113  case ActivationFunction::LeakyReLu:
114  case ActivationFunction::Linear:
115  case ActivationFunction::ReLu:
116  case ActivationFunction::Sigmoid:
117  case ActivationFunction::SoftReLu:
118  case ActivationFunction::Sqrt:
119  case ActivationFunction::Square:
120  case ActivationFunction::TanH:
121  {
122  m_Res = true;
123  break;
124  }
125  default:
126  {
127  m_Res = false;
128  break;
129  }
130  }
131  }
132  };
133 
134  // Function is supported
135  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
136  "Reference activation: function not supported.");
137 
138  return supported;
139 }
140 
142  const TensorInfo& input1,
143  const TensorInfo& output,
144  Optional<std::string&> reasonIfUnsupported) const
145 {
146  bool supported = true;
147 
148  std::array<DataType,6> supportedTypes = {
149  DataType::BFloat16,
150  DataType::Float32,
151  DataType::Float16,
152  DataType::QAsymmS8,
153  DataType::QAsymmU8,
154  DataType::QSymmS16
155  };
156 
157  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
158  "Reference addition: input 0 is not a supported type.");
159 
160  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
161  "Reference addition: input 1 is not a supported type.");
162 
163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
164  "Reference addition: output is not a supported type.");
165 
166  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
167  "Reference addition: input 0 and Input 1 types are mismatched");
168 
169  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
170  "Reference addition: input and output types are mismatched");
171 
172  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
173  "Reference addition: shapes are not suitable for implicit broadcast.");
174 
175  return supported;
176 }
177 
179  const armnn::ArgMinMaxDescriptor &descriptor,
180  armnn::Optional<std::string &> reasonIfUnsupported) const
181 {
182  IgnoreUnused(descriptor);
183 
184  std::array<DataType, 6> supportedTypes =
185  {
186  DataType::BFloat16,
187  DataType::Float32,
188  DataType::QAsymmS8,
189  DataType::QAsymmU8,
190  DataType::QSymmS16,
191  DataType::Signed32
192  };
193 
194  bool supported = true;
195 
196  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
197  "Reference ArgMinMax: input is not a supported type.");
198  supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
199  "Reference ArgMinMax: output type not supported");
200 
201  return supported;
202 }
203 
205  const TensorInfo& output,
206  const TensorInfo& mean,
207  const TensorInfo& variance,
208  const TensorInfo& beta,
209  const TensorInfo& gamma,
210  const BatchNormalizationDescriptor& descriptor,
211  Optional<std::string&> reasonIfUnsupported) const
212 {
213  IgnoreUnused(descriptor);
214 
215  std::array<DataType, 6> supportedTypes =
216  {
217  DataType::BFloat16,
218  DataType::Float32,
219  DataType::Float16,
220  DataType::QAsymmS8,
221  DataType::QAsymmU8,
222  DataType::QSymmS16
223  };
224 
225  bool supported = true;
226 
227  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
228  "Reference batch normalization: input is not a supported type.");
229 
230  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
231  "Reference batch normalization: output is not a supported type.");
232 
233  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
234  "Reference batch normalization: input and output types are mismatched");
235 
236  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
237  "Reference batch normalization: mean is not a supported type.");
238 
239  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
240  "Reference batch normalization: variance is not a supported type.");
241 
242  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
243  "Reference batch normalization: beta is not a supported type.");
244 
245  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
246  "Reference batch normalization: gamma is not a supported type.");
247 
248  return supported;
249 }
250 
252  const TensorInfo& output,
253  const BatchToSpaceNdDescriptor& descriptor,
254  Optional<std::string&> reasonIfUnsupported) const
255 {
256  IgnoreUnused(descriptor);
257 
258  bool supported = true;
259 
260  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
261  std::string inputTensorStr = "input";
262  std::string outputTensorStr = "output";
263 
264  // Define supported types.
265  std::array<DataType,6> supportedTypes =
266  {
267  DataType::BFloat16,
268  DataType::Float32,
269  DataType::Float16,
270  DataType::QAsymmS8,
271  DataType::QAsymmU8,
272  DataType::QSymmS16
273  };
274 
275  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
276  "Reference BatchToSpaceNd: input type not supported.");
277 
278  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
279  "Reference BatchToSpaceNd: output type not supported.");
280 
281  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
282  "Reference BatchToSpaceNd: input and output types mismatched.");
283 
284  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
285  reasonIfUnsupported,
286  CreateIncorrectDimensionsErrorMsg(4,
287  output.GetNumDimensions(),
288  batchToSpaceNdLayerStr,
289  outputTensorStr).data());
290 
291  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
292  reasonIfUnsupported,
293  CreateIncorrectDimensionsErrorMsg(4,
294  input.GetNumDimensions(),
295  batchToSpaceNdLayerStr,
296  inputTensorStr).data());
297 
298  return supported;
299 }
300 
301 bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
302  const TensorInfo& input1,
303  const TensorInfo& output,
304  const ComparisonDescriptor& descriptor,
305  Optional<std::string&> reasonIfUnsupported) const
306 {
307  IgnoreUnused(descriptor);
308  std::array<DataType, 8> supportedInputTypes =
309  {
310  DataType::Boolean,
311  DataType::BFloat16,
312  DataType::Float32,
313  DataType::Float16,
314  DataType::QAsymmS8,
315  DataType::QAsymmU8,
316  DataType::QSymmS16,
317  DataType::Signed32
318  };
319 
320  bool supported = true;
321  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
322  "Reference comparison: input 0 is not a supported type");
323 
324  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
325  "Reference comparison: input 0 and Input 1 types are mismatched");
326 
327  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
328  "Reference comparison: output is not of type Boolean");
329 
330  return supported;
331 }
332 
333 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
334  const TensorInfo& output,
335  const ConcatDescriptor& descriptor,
336  Optional<std::string&> reasonIfUnsupported) const
337 {
338  IgnoreUnused(descriptor);
339 
340  bool supported = true;
341  std::array<DataType,6> supportedTypes =
342  {
343  DataType::BFloat16,
344  DataType::Float32,
345  DataType::Float16,
346  DataType::QAsymmS8,
347  DataType::QAsymmU8,
348  DataType::QSymmS16
349  };
350 
351  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
352  "Reference concatenation: output type not supported");
353  for (const TensorInfo* input : inputs)
354  {
355  ARMNN_ASSERT(input != nullptr);
356  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
357  "Reference concatenation: input type not supported");
358 
359  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
360  "Reference concatenation: input and output types mismatched.");
361  }
362 
363  return supported;
364 }
365 
367  Optional<std::string&> reasonIfUnsupported) const
368 {
369  std::array<DataType,7> supportedTypes =
370  {
371  DataType::BFloat16,
372  DataType::Float32,
373  DataType::QAsymmS8,
374  DataType::QAsymmU8,
375  DataType::QSymmS8,
376  DataType::QSymmS16,
377  DataType::Signed32
378  };
379 
380  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
381  "Reference constant: output is not a supported type.");
382 }
383 
384 bool RefLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
385  const TensorInfo& output,
386  Optional<std::string&> reasonIfUnsupported) const
387 {
388  bool supported = true;
389 
390  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
391  "Reference for ConvertBf16ToFp32 layer: input type not supported");
392 
393  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
394  "Reference for ConvertBf16ToFp32 layer: output type not supported");
395 
396  return supported;
397 }
398 
400  const TensorInfo& output,
401  Optional<std::string&> reasonIfUnsupported) const
402 {
403  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
404  input.GetDataType(),
405  &TrueFunc<>,
406  &FalseInputFuncF32<>,
407  &FalseFuncU8<>,
408  &FalseFuncI32<>,
409  &FalseFuncU8<>) &&
410  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
411  output.GetDataType(),
412  &FalseOutputFuncF16<>,
413  &TrueFunc<>,
414  &FalseFuncU8<>,
415  &FalseFuncI32<>,
416  &FalseFuncU8<>));
417 }
418 
419 bool RefLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
420  const TensorInfo& output,
421  Optional<std::string&> reasonIfUnsupported) const
422 {
423  bool supported = true;
424 
425  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
426  "Reference for ConvertFp32ToBf16 layer: input type not supported");
427 
428  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
429  "Reference for ConvertFp32ToBf16 layer: output type not supported");
430 
431  return supported;
432 }
433 
435  const TensorInfo& output,
436  Optional<std::string&> reasonIfUnsupported) const
437 {
438  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
439  input.GetDataType(),
440  &FalseInputFuncF16<>,
441  &TrueFunc<>,
442  &FalseFuncU8<>,
443  &FalseFuncI32<>,
444  &FalseFuncU8<>) &&
445  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
446  output.GetDataType(),
447  &TrueFunc<>,
448  &FalseOutputFuncF32<>,
449  &FalseFuncU8<>,
450  &FalseFuncI32<>,
451  &FalseFuncU8<>));
452 }
453 
455  const TensorInfo& output,
456  const Convolution2dDescriptor& descriptor,
457  const TensorInfo& weights,
458  const Optional<TensorInfo>& biases,
459  Optional<std::string&> reasonIfUnsupported) const
460 {
461  bool supported = true;
462 
463  // Define supported types.
464  std::array<DataType,7> supportedTypes =
465  {
466  DataType::BFloat16,
467  DataType::Float32,
468  DataType::Float16,
469  DataType::QAsymmS8,
470  DataType::QAsymmU8,
471  DataType::QSymmS8,
472  DataType::QSymmS16
473  };
474 
475  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
476  "Reference Convolution2d: input is not a supported type.");
477 
478  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
479  "Reference Convolution2d: output is not a supported type.");
480 
481  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
482  if (input.GetDataType() == DataType::BFloat16)
483  {
484  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
485  {
486  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
487  supported = false;
488  }
489  }
490  else
491  {
492  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
493  "Reference Convolution2d: input and output types mismatched.");
494  }
495 
496  const DataType inputType = input.GetDataType();
497  if (IsQuantized8BitType(inputType))
498  {
500  std::array<DataType, 4> supportedWeightTypes =
501  {
502  DataType::QAsymmS8,
503  DataType::QAsymmU8,
504  DataType::QSymmS8,
505  DataType::QuantizedSymm8PerAxis // deprecated
506  };
508 
509  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
510  "Reference Convolution2d: weights type not supported for quantized input.");
511  }
512  else
513  {
514  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
515  "Reference Convolution2d: weights is not a supported type.");
516 
517  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
518  "Reference Convolution2d: input and weights types mismatched.");
519  }
520 
521  if (biases.has_value())
522  {
523  std::array<DataType,4> biasesSupportedTypes =
524  {
525  DataType::BFloat16,
526  DataType::Float32,
527  DataType::Float16,
528  DataType::Signed32
529  };
530 
531  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
532  "Reference Convolution2d: biases is not a supported type.");
533  }
534  IgnoreUnused(descriptor);
535 
536  return supported;
537 }
538 
540  const TensorInfo& output,
541  Optional<std::string&> reasonIfUnsupported) const
542 {
543  bool supported = true;
544 
545  std::array<DataType, 8> supportedTypes =
546  {
547  DataType::BFloat16,
548  DataType::Float16,
549  DataType::Float32,
550  DataType::QAsymmS8,
551  DataType::QAsymmU8,
552  DataType::QSymmS8,
553  DataType::QSymmS16,
554  DataType::Signed32
555  };
556 
557  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
558  "Reference for Debug layer: input type not supported");
559 
560  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
561  "Reference for Debug layer: output type not supported");
562 
563  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
564  "Reference for Debug layer: input and output types are mismatched");
565 
566  return supported;
567 }
568 
569 bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
570  const TensorInfo& output,
571  const DepthToSpaceDescriptor& descriptor,
572  Optional<std::string&> reasonIfUnsupported) const
573 {
574  IgnoreUnused(descriptor);
575  bool supported = true;
576 
577  std::array<DataType,6> supportedTypes =
578  {
579  DataType::BFloat16,
580  DataType::Float32,
581  DataType::Float16,
582  DataType::QAsymmS8,
583  DataType::QAsymmU8,
584  DataType::QSymmS16
585  };
586 
587  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
588  "Reference DepthToSpace: input type not supported");
589 
590  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
591  "Reference DepthToSpace: output type not supported");
592 
593  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
594  "Reference DepthToSpace: input and output types are mismatched");
595 
596  return supported;
597 }
598 
600  const TensorInfo& output,
601  const DepthwiseConvolution2dDescriptor& descriptor,
602  const TensorInfo& weights,
603  const Optional<TensorInfo>& biases,
604  Optional<std::string&> reasonIfUnsupported) const
605 {
606  IgnoreUnused(descriptor);
607  bool supported = true;
608 
609  // Define supported types.
610  std::array<DataType,7> supportedTypes =
611  {
612  DataType::BFloat16,
613  DataType::Float32,
614  DataType::Float16,
615  DataType::QAsymmS8,
616  DataType::QAsymmU8,
617  DataType::QSymmS8,
618  DataType::QSymmS16
619  };
620 
621  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
622  "Reference DepthwiseConvolution2d: input is not a supported type.");
623 
624  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
625  "Reference DepthwiseConvolution2d: output is not a supported type.");
626 
627  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
628  "Reference DepthwiseConvolution2d: input and output types mismatched.");
629 
630  const DataType inputType = input.GetDataType();
631  if (IsQuantized8BitType(inputType))
632  {
634  std::array<DataType, 4> supportedWeightTypes =
635  {
636  DataType::QAsymmS8,
637  DataType::QAsymmU8,
638  DataType::QSymmS8,
639  DataType::QuantizedSymm8PerAxis // deprecated
640  };
642 
643  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
644  "Reference DepthwiseConvolution2d: weights type not supported for "
645  "quantized input.");
646  }
647  else
648  {
649  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
650  "Reference DepthwiseConvolution2d: weights is not a supported type.");
651 
652  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
653  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
654  }
655 
656  if (biases.has_value())
657  {
658  std::array<DataType,4> biasesSupportedTypes =
659  {
660  DataType::BFloat16,
661  DataType::Float32,
662  DataType::Float16,
663  DataType::Signed32
664  };
665  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
666  "Reference DepthwiseConvolution2d: biases is not a supported type.");
667  }
668 
669  return supported;
670 
671 }
672 
674  const TensorInfo& output,
675  Optional<std::string&> reasonIfUnsupported) const
676 {
677  bool supported = true;
678 
679  std::array<DataType,4> supportedInputTypes = {
680  DataType::QAsymmS8,
681  DataType::QAsymmU8,
682  DataType::QSymmS8,
683  DataType::QSymmS16
684  };
685 
686  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
687  "Reference for Dequantize layer: input type not supported.");
688 
689  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
690  "Reference for Dequantize layer: per-axis quantized input not support .");
691 
692  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
693  "Reference dequantize: per-axis quantized input not support .");
694 
695  std::array<DataType,3> supportedOutputTypes = {
696  DataType::BFloat16,
697  DataType::Float32,
698  DataType::Float16
699  };
700 
701  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
702  "Reference for Dequantize layer: output type not supported.");
703 
704  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
705  "Reference for Dequantize layer: input/output shapes have different num total "
706  "elements.");
707 
708  return supported;
709 }
710 
712  const TensorInfo& scores,
713  const TensorInfo& anchors,
714  const TensorInfo& detectionBoxes,
715  const TensorInfo& detectionClasses,
716  const TensorInfo& detectionScores,
717  const TensorInfo& numDetections,
718  const DetectionPostProcessDescriptor& descriptor,
719  Optional<std::string&> reasonIfUnsupported) const
720 {
721  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
722 
723  bool supported = true;
724 
725  std::array<DataType,5> supportedInputTypes =
726  {
727  DataType::BFloat16,
728  DataType::Float32,
729  DataType::QAsymmS8,
730  DataType::QAsymmU8,
731  DataType::QSymmS16
732  };
733 
734  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
735  "Reference DetectionPostProcess: input 0 is not a supported type.");
736 
737  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
738  "Reference DetectionPostProcess: input 1 is not a supported type.");
739 
740  return supported;
741 }
742 
743 bool RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
744  const TensorInfo& output,
745  const DepthwiseConvolution2dDescriptor& descriptor,
746  const TensorInfo& weights,
747  const Optional<TensorInfo>& biases,
748  Optional<std::string&> reasonIfUnsupported) const
749 {
750  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
751 }
752 
754  const TensorInfo& input1,
755  const TensorInfo& output,
756  Optional<std::string&> reasonIfUnsupported) const
757 {
758  bool supported = true;
759 
760  std::array<DataType,6> supportedTypes = {
761  DataType::BFloat16,
762  DataType::Float32,
763  DataType::Float16,
764  DataType::QAsymmS8,
765  DataType::QAsymmU8,
766  DataType::QSymmS16
767  };
768 
769  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
770  "Reference division: input 0 is not a supported type.");
771 
772  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
773  "Reference division: input 1 is not a supported type.");
774 
775  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
776  "Reference division: output is not a supported type.");
777 
778  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
779  "Reference division: input 0 and Input 1 types are mismatched");
780 
781  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
782  "Reference division: input and output types are mismatched");
783 
784  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
785  "Reference division: shapes are not suitable for implicit broadcast.");
786 
787  return supported;
788 }
789 
790 bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
791  const TensorInfo& output,
792  const ElementwiseUnaryDescriptor& descriptor,
793  Optional<std::string&> reasonIfUnsupported) const
794 {
795  IgnoreUnused(descriptor);
796 
797  std::array<DataType, 7> supportedTypes =
798  {
799  DataType::BFloat16,
800  DataType::Float32,
801  DataType::Float16,
802  DataType::QAsymmS8,
803  DataType::QAsymmU8,
804  DataType::QSymmS16,
805  DataType::Signed32
806  };
807 
808  bool supported = true;
809 
810  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
811  "Reference elementwise unary: input type not supported");
812 
813  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
814  "Reference elementwise unary: output type not supported");
815 
816  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
817  "Reference elementwise unary: input and output types not matching");
818 
819  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
820  "Reference elementwise unary: input and output shapes"
821  "have different number of total elements");
822 
823  return supported;
824 }
825 
827  const TensorInfo& input1,
828  const TensorInfo& output,
829  Optional<std::string&> reasonIfUnsupported) const
830 {
831  return IsComparisonSupported(input0,
832  input1,
833  output,
834  ComparisonDescriptor(ComparisonOperation::Equal),
835  reasonIfUnsupported);
836 }
837 
839  const FakeQuantizationDescriptor& descriptor,
840  Optional<std::string&> reasonIfUnsupported) const
841 {
842  IgnoreUnused(descriptor);
843  bool supported = true;
844 
845  std::array<DataType,1> supportedTypes =
846  {
847  DataType::Float32
848  };
849 
850  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
851  "Reference fake quantization: input type not supported.");
852 
853  return supported;
854 }
855 
857  const TensorInfo& output,
858  Optional<std::string&> reasonIfUnsupported) const
859 {
860  IgnoreUnused(output);
861  bool supported = true;
862 
863  std::array<DataType,4> supportedTypes =
864  {
865  DataType::BFloat16,
866  DataType::Float32,
867  DataType::Float16,
868  DataType::QSymmS16
869  };
870 
871  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
872  "Reference Floor: input type not supported.");
873 
874  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
875  "Reference Floor: output type not supported.");
876 
877  return supported;
878 }
879 
881  const TensorInfo& output,
882  const TensorInfo& weights,
883  const TensorInfo& biases,
884  const FullyConnectedDescriptor& descriptor,
885  Optional<std::string&> reasonIfUnsupported) const
886 {
887  bool supported = true;
888 
889  // Define supported types.
890  std::array<DataType,6> supportedTypes =
891  {
892  DataType::BFloat16,
893  DataType::Float32,
894  DataType::Float16,
895  DataType::QAsymmS8,
896  DataType::QAsymmU8,
897  DataType::QSymmS16
898  };
899 
900  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
901  "Reference Fully Connected: input type not supported.");
902 
903  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
904  "Reference Fully Connected: output type not supported.");
905 
906  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
907  "Reference Fully Connected: weights type not supported.");
908 
909  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
910  if (input.GetDataType() == DataType::BFloat16)
911  {
912  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
913  {
914  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
915  supported = false;
916  }
917  }
918  else
919  {
920  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
921  "Reference Fully Connected: input and output types mismatched.");
922  }
923 
925  std::array<DataType, 4> supportedWeightTypes =
926  {
927  DataType::QAsymmS8,
928  DataType::QAsymmU8,
929  DataType::QSymmS8,
930  DataType::QuantizedSymm8PerAxis // deprecated
931  };
933 
934  if (IsQuantized8BitType(input.GetDataType()))
935  {
936 
937  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
938  "Reference Fully Connected: weights type not supported for quantized input.");
939  }
940  else
941  {
942  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
943  "Reference Fully Connected: weights is not a supported type.");
944 
945  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
946  "Reference Fully Connected: input and weights types mismatched.");
947  }
948 
949  if (descriptor.m_BiasEnabled)
950  {
951  // Defined supported types for bias
952  std::array<DataType, 5>
953  supportedBiasTypes =
954  {
955  DataType::BFloat16,
956  DataType::Float32,
957  DataType::Float16,
958  DataType::Signed32,
959  DataType::QAsymmS8
960  };
961 
962  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
963  "Reference Fully Connected: bias type not supported.");
964 
965  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
966  "Reference Fully Connected: bias and weight types mismatch.");
967 
968  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
969  "Reference Fully Connected: bias type inferred from weights is incompatible.");
970 
971  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
972  "Reference Fully Connected: bias must have 1 dimension.");
973 
974  }
975 
976  return supported;
977 }
978 
980  const armnn::TensorInfo& input1,
981  const armnn::TensorInfo& output,
982  armnn::Optional<std::string&> reasonIfUnsupported) const
983 {
984  bool supported = true;
985  std::array<DataType,6> supportedTypes =
986  {
987  DataType::BFloat16,
988  DataType::Float32,
989  DataType::Float16,
990  DataType::QAsymmS8,
991  DataType::QAsymmU8,
992  DataType::QSymmS16
993  };
994 
995  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
996  "Reference Gather: input type not supported");
997 
998  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
999  "Reference Gather: output type not supported");
1000 
1001  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1002  "Reference Gather: indices (input1) type not supported");
1003 
1004  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1005  "Reference Gather: input and output types not matching");
1006 
1007  return supported;
1008 }
1009 
1011  const TensorInfo& input1,
1012  const TensorInfo& output,
1013  Optional<std::string&> reasonIfUnsupported) const
1014 {
1015  return IsComparisonSupported(input0,
1016  input1,
1017  output,
1018  ComparisonDescriptor(ComparisonOperation::Greater),
1019  reasonIfUnsupported);
1020 }
1021 
1023  Optional<std::string&> /*reasonIfUnsupported*/) const
1024 {
1025  return true;
1026 }
1027 
1028 bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1029  const TensorInfo& output,
1030  const InstanceNormalizationDescriptor& descriptor,
1031  Optional<std::string&> reasonIfUnsupported) const
1032 {
1033  IgnoreUnused(descriptor);
1034  // Define supported types
1035  std::array<DataType, 3> supportedTypes =
1036  {
1037  DataType::BFloat16,
1038  DataType::Float32,
1039  DataType::Float16
1040  };
1041 
1042  bool supported = true;
1043 
1044  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1045  "Reference Instance Normalization: input type not supported.");
1046 
1047  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1048  "Reference Instance Normalization: output type not supported.");
1049 
1050  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1051  "Reference Instance Normalization: input and output types mismatched.");
1052 
1053  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1054  "Reference Instance Normalization: input and output shapes have different "
1055  "num total elements.");
1056 
1057  return supported;
1058 }
1059 
1061  const TensorInfo& output,
1062  const L2NormalizationDescriptor& descriptor,
1063  Optional<std::string&> reasonIfUnsupported) const
1064 {
1065  IgnoreUnused(descriptor);
1066  // Define supported types
1067  std::array<DataType, 6> supportedTypes =
1068  {
1069  DataType::BFloat16,
1070  DataType::Float32,
1071  DataType::Float16,
1072  DataType::QAsymmS8,
1073  DataType::QAsymmU8,
1074  DataType::QSymmS16
1075  };
1076 
1077  bool supported = true;
1078 
1079  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1080  "Reference L2normalization: input type not supported.");
1081 
1082  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1083  "Reference L2normalization: output type not supported.");
1084 
1085  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1086  "Reference L2normalization: input and output types mismatched.");
1087 
1088  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1089  "Reference L2normalization: input and output shapes have different "
1090  "num total elements.");
1091 
1092  return supported;
1093 }
1094 
1095 bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1096  const TensorInfo& output,
1097  const LogSoftmaxDescriptor& descriptor,
1098  Optional<std::string&> reasonIfUnsupported) const
1099 {
1100  IgnoreUnused(descriptor);
1101 
1102  std::array<DataType, 3> supportedTypes =
1103  {
1104  DataType::BFloat16,
1105  DataType::Float32,
1106  DataType::Float16
1107  };
1108 
1109  bool supported = true;
1110  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1111  "Reference LogSoftmax: input type not supported");
1112 
1113  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1114  "Reference LogSoftmax: output type not supported");
1115 
1116  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1117  "Reference LogSoftmax: input and output types do not match");
1118 
1119  return supported;
1120 }
1121 
1123  const TensorInfo& outputStateIn,
1124  const TensorInfo& cellStateIn,
1125  const TensorInfo& scratchBuffer,
1126  const TensorInfo& outputStateOut,
1127  const TensorInfo& cellStateOut,
1128  const TensorInfo& output,
1129  const LstmDescriptor& descriptor,
1130  const LstmInputParamsInfo& paramsInfo,
1131  Optional<std::string&> reasonIfUnsupported) const
1132 {
1133  IgnoreUnused(descriptor);
1134  IgnoreUnused(paramsInfo);
1135 
1136  bool supported = true;
1137 
1138  std::array<DataType,3> supportedTypes = {
1139  DataType::BFloat16,
1140  DataType::Float32,
1141  DataType::QSymmS16
1142  };
1143 
1144  // check inputs and outputs
1145  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1146  "Reference Lstm: input is not a supported type.");
1147  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1148  "Reference Lstm: input and outputStateIn types are mismatched");
1149  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1150  "Reference Lstm: input and cellStateIn types are mismatched");
1151  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1152  "Reference Lstm: input and scratchBuffer types are mismatched");
1153  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1154  "Reference Lstm: input and outputStateOut types are mismatched");
1155  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1156  "Reference Lstm: input and cellStateOut types are mismatched");
1157  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1158  "Reference Lstm: input and output types are mismatched");
1159  // check layer parameters
1160  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1161  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1162  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1163  "Reference Lstm: input and InputToCellWeights types are mismatched");
1164  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1165  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1166  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1167  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1168  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1169  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1170  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1171  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1172  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1173  "Reference Lstm: input and ForgetGateBias types are mismatched");
1174  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1175  "Reference Lstm: input and CellBias types are mismatched");
1176  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1177  "Reference Lstm: input and OutputGateBias types are mismatched");
1178  if (!descriptor.m_CifgEnabled)
1179  {
1180  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1181  "Reference Lstm: input and InputToInputWeights types are mismatched");
1182  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1183  reasonIfUnsupported,
1184  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1185  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1186  "Reference Lstm: input and InputGateBias types are mismatched");
1187  if (descriptor.m_PeepholeEnabled)
1188  {
1189  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1190  reasonIfUnsupported,
1191  "Reference Lstm: input and CellToInputWeights types are mismatched");
1192  }
1193  }
1194  if (descriptor.m_PeepholeEnabled)
1195  {
1196  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1197  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1198  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1199  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1200  }
1201  if (descriptor.m_ProjectionEnabled)
1202  {
1203  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1204  "Reference Lstm: input and mProjectionWeights types are mismatched");
1205  if (paramsInfo.m_ProjectionBias != nullptr)
1206  {
1207  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1208  "Reference Lstm: input and ProjectionBias types are mismatched");
1209  }
1210  }
1211  if (descriptor.m_LayerNormEnabled)
1212  {
1213  if (!descriptor.m_CifgEnabled)
1214  {
1215  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1216  reasonIfUnsupported,
1217  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1218  }
1219  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1220  reasonIfUnsupported,
1221  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1222  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1223  reasonIfUnsupported,
1224  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1225  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1226  reasonIfUnsupported,
1227  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1228  }
1229 
1230  return supported;
1231 }
1232 
1234  const TensorInfo& input1,
1235  const TensorInfo& output,
1236  Optional<std::string&> reasonIfUnsupported) const
1237 {
1238  bool supported = true;
1239 
1240  std::array<DataType,6> supportedTypes = {
1241  DataType::BFloat16,
1242  DataType::Float32,
1243  DataType::Float16,
1244  DataType::QAsymmS8,
1245  DataType::QAsymmU8,
1246  DataType::QSymmS16
1247  };
1248 
1249  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1250  "Reference maximum: input 0 is not a supported type.");
1251 
1252  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1253  "Reference maximum: input 1 is not a supported type.");
1254 
1255  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1256  "Reference maximum: output is not a supported type.");
1257 
1258  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1259  "Reference maximum: input 0 and Input 1 types are mismatched");
1260 
1261  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1262  "Reference maximum: input and output types are mismatched");
1263 
1264  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1265  "Reference maximum: shapes are not suitable for implicit broadcast.");
1266 
1267  return supported;
1268 }
1269 
1271  const TensorInfo& output,
1272  const MeanDescriptor& descriptor,
1273  Optional<std::string&> reasonIfUnsupported) const
1274 {
1275  bool supported = true;
1276  std::string meanLayerStr = "Mean";
1277  std::string outputTensorStr = "output";
1278 
1279  std::array<DataType,6> supportedTypes =
1280  {
1281  DataType::BFloat16,
1282  DataType::Float32,
1283  DataType::Float16,
1284  DataType::QAsymmS8,
1285  DataType::QAsymmU8,
1286  DataType::QSymmS16
1287  };
1288 
1289  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1290  "Reference Mean: input type not supported.");
1291 
1292  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1293  "Reference Mean: input and output types are mismatched");
1294 
1295  if (descriptor.m_KeepDims)
1296  {
1297  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1298  reasonIfUnsupported,
1299  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1300  output.GetNumDimensions(),
1301  meanLayerStr, outputTensorStr).data());
1302  }
1303  else if (descriptor.m_Axis.empty())
1304  {
1305  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1306  reasonIfUnsupported,
1307  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1308  meanLayerStr, outputTensorStr).data());
1309  }
1310  else
1311  {
1312  auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1313 
1314  if (outputDim > 0)
1315  {
1316  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1317  reasonIfUnsupported,
1318  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1319  meanLayerStr, outputTensorStr).data());
1320  }
1321  else
1322  {
1323  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1324  reasonIfUnsupported,
1325  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1326  meanLayerStr, outputTensorStr).data());
1327  }
1328  }
1329 
1330  return supported;
1331 }
1332 
1333 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
1334  const TensorInfo& output,
1335  const MergerDescriptor& descriptor,
1336  Optional<std::string&> reasonIfUnsupported) const
1337 {
1338  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1339 }
1340 
1342  const TensorInfo &output,
1343  Optional<std::string &> reasonIfUnsupported) const
1344 {
1345  bool supported = true;
1346 
1347  std::array<DataType,7> supportedTypes =
1348  {
1349  DataType::BFloat16,
1350  DataType::Float32,
1351  DataType::Float16,
1352  DataType::QAsymmS8,
1353  DataType::QAsymmU8,
1354  DataType::QSymmS16,
1355  DataType::Boolean
1356  };
1357 
1358  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1359  "Reference MemCopy: input type not supported");
1360 
1361  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1362  "Reference MemCopy: output type not supported");
1363 
1364  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1365  "Reference MemCopy: input and output types are mismatched");
1366 
1367  return supported;
1368 }
1369 
1371  const TensorInfo& input1,
1372  const TensorInfo& output,
1373  Optional<std::string&> reasonIfUnsupported) const
1374 {
1375  bool supported = true;
1376 
1377  std::array<DataType,6> supportedTypes = {
1378  DataType::BFloat16,
1379  DataType::Float32,
1380  DataType::Float16,
1381  DataType::QAsymmS8,
1382  DataType::QAsymmU8,
1383  DataType::QSymmS16
1384  };
1385 
1386  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1387  "Reference minimum: input 0 is not a supported type.");
1388 
1389  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1390  "Reference minimum: input 1 is not a supported type.");
1391 
1392  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1393  "Reference minimum: output is not a supported type.");
1394 
1395  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1396  "Reference minimum: input 0 and Input 1 types are mismatched");
1397 
1398  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1399  "Reference minimum: input and output types are mismatched");
1400 
1401  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1402  "Reference minimum: shapes are not suitable for implicit broadcast.");
1403 
1404  return supported;
1405 }
1406 
1408  const TensorInfo& input1,
1409  const TensorInfo& output,
1410  Optional<std::string&> reasonIfUnsupported) const
1411 {
1412  bool supported = true;
1413 
1414  std::array<DataType,6> supportedTypes = {
1415  DataType::BFloat16,
1416  DataType::Float32,
1417  DataType::Float16,
1418  DataType::QAsymmS8,
1419  DataType::QAsymmU8,
1420  DataType::QSymmS16
1421  };
1422 
1423  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1424  "Reference multiplication: input 0 is not a supported type.");
1425 
1426  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1427  "Reference multiplication: input 1 is not a supported type.");
1428 
1429  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1430  "Reference multiplication: output is not a supported type.");
1431 
1432  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1433  "Reference multiplication: input 0 and Input 1 types are mismatched");
1434 
1435  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1436  "Reference multiplication: input and output types are mismatched");
1437 
1438  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1439  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1440 
1441  return supported;
1442 }
1443 
1445  const TensorInfo& output,
1446  const NormalizationDescriptor& descriptor,
1447  Optional<std::string&> reasonIfUnsupported) const
1448 {
1449  IgnoreUnused(descriptor);
1450 
1451  // Define supported types
1452  std::array<DataType, 6> supportedTypes =
1453  {
1454  DataType::BFloat16,
1455  DataType::Float16,
1456  DataType::Float32,
1457  DataType::QAsymmS8,
1458  DataType::QAsymmU8,
1459  DataType::QSymmS16
1460  };
1461 
1462  bool supported = true;
1463 
1464  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1465  "Reference normalization: input type not supported.");
1466 
1467  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1468  "Reference normalization: output type not supported.");
1469 
1470  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1471  "Reference normalization: input and output shapes have different "
1472  "num total elements.");
1473 
1474  return supported;
1475 }
1476 
1478  Optional<std::string&> /*reasonIfUnsupported*/) const
1479 {
1480  return true;
1481 }
1482 
1484  const TensorInfo& output,
1485  const PadDescriptor& descriptor,
1486  Optional<std::string&> reasonIfUnsupported) const
1487 {
1488  IgnoreUnused(descriptor);
1489  bool supported = true;
1490 
1491  // Define supported output and inputs types.
1492  std::array<DataType,6> supportedTypes =
1493  {
1494  DataType::BFloat16,
1495  DataType::Float32,
1496  DataType::Float16,
1497  DataType::QAsymmS8,
1498  DataType::QAsymmU8,
1499  DataType::QSymmS16
1500  };
1501 
1502  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1503  "Reference pad: input is not a supported type.");
1504 
1505  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1506  "Reference pad: output is not a supported type.");
1507 
1508  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1509  "Reference pad: input and output types are mismatched.");
1510 
1511  return supported;
1512 }
1513 
1515  const TensorInfo& output,
1516  const PermuteDescriptor& descriptor,
1517  Optional<std::string&> reasonIfUnsupported) const
1518 {
1519  IgnoreUnused(descriptor);
1520  bool supported = true;
1521 
1522  // Define supported output and inputs types.
1523  std::array<DataType, 6> supportedTypes =
1524  {
1525  DataType::BFloat16,
1526  DataType::Float32,
1527  DataType::Float16,
1528  DataType::QAsymmS8,
1529  DataType::QAsymmU8,
1530  DataType::QSymmS16
1531  };
1532 
1533  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1534  "Reference permute: input is not a supported type.");
1535 
1536  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1537  "Reference permute: output is not a supported type.");
1538 
1539  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1540  "Reference permute: input and output types are mismatched.");
1541 
1542  return supported;
1543 }
1544 
1546  const TensorInfo& output,
1547  const Pooling2dDescriptor& descriptor,
1548  Optional<std::string&> reasonIfUnsupported) const
1549 {
1550  IgnoreUnused(descriptor);
1551  bool supported = true;
1552 
1553  // Define supported output and inputs types.
1554  std::array<DataType,6> supportedTypes =
1555  {
1556  DataType::BFloat16,
1557  DataType::Float32,
1558  DataType::Float16,
1559  DataType::QAsymmS8,
1560  DataType::QAsymmU8,
1561  DataType::QSymmS16
1562  };
1563 
1564  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1565  "Reference poolind2d: input is not a supported type.");
1566 
1567  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1568  "Reference poolind2d: output is not a supported type.");
1569 
1570  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1571  "Reference poolind2d: input and output types are mismatched.");
1572 
1573  return supported;
1574 }
1575 
1577  const TensorInfo& previousOutputIn,
1578  const TensorInfo& previousCellStateIn,
1579  const TensorInfo& outputStateOut,
1580  const TensorInfo& cellStateOut,
1581  const TensorInfo& output,
1582  const QLstmDescriptor& descriptor,
1583  const LstmInputParamsInfo& paramsInfo,
1584  Optional<std::string&> reasonIfUnsupported) const
1585 {
1586  IgnoreUnused(input);
1587  IgnoreUnused(previousOutputIn);
1588  IgnoreUnused(previousCellStateIn);
1589  IgnoreUnused(outputStateOut);
1590  IgnoreUnused(cellStateOut);
1591  IgnoreUnused(output);
1592  IgnoreUnused(descriptor);
1593  IgnoreUnused(paramsInfo);
1594 
1595  IgnoreUnused(reasonIfUnsupported);
1596 
1597  return true;
1598 }
1599 
1601  const TensorInfo& output,
1602  Optional<std::string&> reasonIfUnsupported) const
1603 {
1604  bool supported = true;
1605 
1606  // Define supported input types.
1607  std::array<DataType,7> supportedInputTypes = {
1608  DataType::BFloat16,
1609  DataType::Float32,
1610  DataType::Float16,
1611  DataType::QAsymmS8,
1612  DataType::QAsymmU8,
1613  DataType::QSymmS8,
1614  DataType::QSymmS16
1615  };
1616 
1617  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1618  "Reference quantize: input type not supported.");
1619 
1620  // Define supported output types.
1621  std::array<DataType,4> supportedOutputTypes = {
1622  DataType::QAsymmS8,
1623  DataType::QAsymmU8,
1624  DataType::QSymmS8,
1625  DataType::QSymmS16
1626  };
1627  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1628  "Reference quantize: output type not supported.");
1629 
1630  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1631  "Reference quantize: input and output shapes have different num total elements.");
1632 
1633  return supported;
1634 }
1635 
1637  const TensorInfo& output,
1638  const ReshapeDescriptor& descriptor,
1639  Optional<std::string&> reasonIfUnsupported) const
1640 {
1641  IgnoreUnused(output);
1642  IgnoreUnused(descriptor);
1643  // Define supported output types.
1644  std::array<DataType,7> supportedOutputTypes =
1645  {
1646  DataType::BFloat16,
1647  DataType::Float32,
1648  DataType::Float16,
1649  DataType::Signed32,
1650  DataType::QAsymmS8,
1651  DataType::QAsymmU8,
1652  DataType::QSymmS16
1653  };
1654 
1655  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1656  "Reference reshape: input type not supported.");
1657 }
1658 
1660  const TensorInfo& output,
1661  Optional<std::string&> reasonIfUnsupported) const
1662 {
1663  bool supported = true;
1664  std::array<DataType,6> supportedTypes =
1665  {
1666  DataType::BFloat16,
1667  DataType::Float32,
1668  DataType::Float16,
1669  DataType::QAsymmS8,
1670  DataType::QAsymmU8,
1671  DataType::QSymmS16
1672  };
1673 
1674  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1675  "Reference ResizeBilinear: input type not supported");
1676 
1677  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1678  "Reference ResizeBilinear: output type not supported");
1679 
1680  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1681  "Reference ResizeBilinear: input and output types not matching");
1682 
1683  return supported;
1684 }
1685 
1687  const TensorInfo& output,
1688  const ResizeDescriptor& descriptor,
1689  Optional<std::string&> reasonIfUnsupported) const
1690 {
1691  IgnoreUnused(descriptor);
1692  bool supported = true;
1693  std::array<DataType,6> supportedTypes =
1694  {
1695  DataType::BFloat16,
1696  DataType::Float32,
1697  DataType::Float16,
1698  DataType::QAsymmS8,
1699  DataType::QAsymmU8,
1700  DataType::QSymmS16
1701  };
1702 
1703  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1704  "Reference Resize: input type not supported");
1705 
1706  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1707  "Reference Resize: output type not supported");
1708 
1709  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1710  "Reference Resize: input and output types not matching");
1711 
1712  return supported;
1713 }
1714 
1716  const TensorInfo& output,
1717  Optional<std::string&> reasonIfUnsupported) const
1718 {
1719  return IsElementwiseUnarySupported(input,
1720  output,
1721  ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
1722  reasonIfUnsupported);
1723 }
1724 
1725 bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
1726  const TensorInfo& output,
1727  const SliceDescriptor& descriptor,
1728  Optional<std::string&> reasonIfUnsupported) const
1729 {
1730  IgnoreUnused(descriptor);
1731  bool supported = true;
1732 
1733  std::array<DataType, 5> supportedTypes =
1734  {
1735  DataType::BFloat16,
1736  DataType::Float32,
1737  DataType::QAsymmS8,
1738  DataType::QAsymmU8,
1739  DataType::QSymmS16
1740  };
1741 
1742  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1743  "Reference Slice: input type not supported");
1744 
1745  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1746  "Reference Slice: output type not supported");
1747 
1748  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1749  "Reference Slice: input and output types are mismatched");
1750 
1751  return supported;
1752 }
1753 
1755  const TensorInfo& output,
1756  const SoftmaxDescriptor& descriptor,
1757  Optional<std::string&> reasonIfUnsupported) const
1758 {
1759  IgnoreUnused(descriptor);
1760  bool supported = true;
1761  std::array<DataType,7> supportedTypes =
1762  {
1763  DataType::BFloat16,
1764  DataType::Float32,
1765  DataType::Float16,
1766  DataType::QSymmS8,
1767  DataType::QAsymmS8,
1768  DataType::QAsymmU8,
1769  DataType::QSymmS16
1770  };
1771 
1772  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1773  "Reference Softmax: output type not supported");
1774 
1775  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1776  "Reference Softmax: input type not supported");
1777 
1778  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1779  "Reference Softmax: input type not supported");
1780 
1781  return supported;
1782 }
1783 
1785  const TensorInfo& output,
1786  const SpaceToBatchNdDescriptor& descriptor,
1787  Optional<std::string&> reasonIfUnsupported) const
1788 {
1789  IgnoreUnused(descriptor);
1790  bool supported = true;
1791  std::array<DataType,6> supportedTypes =
1792  {
1793  DataType::BFloat16,
1794  DataType::Float32,
1795  DataType::Float16,
1796  DataType::QAsymmS8,
1797  DataType::QAsymmU8,
1798  DataType::QSymmS16
1799  };
1800 
1801  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1802  "Reference SpaceToBatchNd: input type not supported");
1803 
1804  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1805  "Reference SpaceToBatchNd: output type not supported");
1806 
1807  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1808  "Reference SpaceToBatchNd: input and output types are mismatched");
1809 
1810  return supported;
1811 }
1812 
1814  const TensorInfo& output,
1815  const SpaceToDepthDescriptor& descriptor,
1816  Optional<std::string&> reasonIfUnsupported) const
1817 {
1818 
1819  IgnoreUnused(descriptor);
1820  bool supported = true;
1821 
1822  std::array<DataType,6> supportedTypes =
1823  {
1824  DataType::BFloat16,
1825  DataType::Float32,
1826  DataType::Float16,
1827  DataType::QAsymmS8,
1828  DataType::QAsymmU8,
1829  DataType::QSymmS16
1830  };
1831 
1832  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1833  "Reference SpaceToDepth: input type not supported");
1834 
1835  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1836  "Reference SpaceToDepth: output type not supported");
1837 
1838  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1839  "Reference SpaceToDepth: input and output types are mismatched");
1840 
1841  return supported;
1842 }
1843 
1845  const ViewsDescriptor& descriptor,
1846  Optional<std::string&> reasonIfUnsupported) const
1847 {
1848  IgnoreUnused(descriptor);
1849  bool supported = true;
1850  std::array<DataType,6> supportedTypes =
1851  {
1852  DataType::BFloat16,
1853  DataType::Float32,
1854  DataType::Float16,
1855  DataType::QAsymmS8,
1856  DataType::QAsymmU8,
1857  DataType::QSymmS16
1858  };
1859 
1860  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1861  "Reference splitter: input type not supported");
1862 
1863  return supported;
1864 }
1865 
1867  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1868  const ViewsDescriptor& descriptor,
1869  Optional<std::string&> reasonIfUnsupported) const
1870 {
1871  IgnoreUnused(descriptor);
1872  bool supported = true;
1873  std::array<DataType,6> supportedTypes =
1874  {
1875  DataType::BFloat16,
1876  DataType::Float32,
1877  DataType::Float16,
1878  DataType::QAsymmS8,
1879  DataType::QAsymmU8,
1880  DataType::QSymmS16
1881  };
1882 
1883  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1884  "Reference splitter: output type not supported");
1885  for (const TensorInfo output : outputs)
1886  {
1887  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1888  "Reference splitter: input type not supported");
1889 
1890  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1891  "Reference splitter: input and output types mismatched.");
1892  }
1893 
1894  return supported;
1895 }
1896 
1897 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1898  const TensorInfo& output,
1899  const StackDescriptor& descriptor,
1900  Optional<std::string&> reasonIfUnsupported) const
1901 {
1902  IgnoreUnused(descriptor);
1903 
1904  bool supported = true;
1905  std::array<DataType,6> supportedTypes =
1906  {
1907  DataType::BFloat16,
1908  DataType::Float32,
1909  DataType::Float16,
1910  DataType::QAsymmS8,
1911  DataType::QAsymmU8,
1912  DataType::QSymmS16
1913  };
1914 
1915  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1916  "Reference stack: output type not supported");
1917  for (const TensorInfo* input : inputs)
1918  {
1919  ARMNN_ASSERT(input != nullptr);
1920  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
1921  "Reference stack: input type not supported");
1922 
1923  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
1924  "Reference stack: input and output types mismatched.");
1925  }
1926 
1927  return supported;
1928 }
1929 
1931  const TensorInfo& output,
1932  const StridedSliceDescriptor& descriptor,
1933  Optional<std::string&> reasonIfUnsupported) const
1934 {
1935  IgnoreUnused(descriptor);
1936  bool supported = true;
1937 
1938  std::array<DataType,5> supportedTypes =
1939  {
1940  DataType::BFloat16,
1941  DataType::Float32,
1942  DataType::QAsymmS8,
1943  DataType::QAsymmU8,
1944  DataType::QSymmS16
1945  };
1946 
1947  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1948  "Reference StridedSlice: input type not supported");
1949 
1950  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1951  "Reference StridedSlice: output type not supported");
1952 
1953  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1954  "Reference StridedSlice: input and output types are mismatched");
1955 
1956  return supported;
1957 }
1958 
1960  const TensorInfo& input1,
1961  const TensorInfo& output,
1962  Optional<std::string&> reasonIfUnsupported) const
1963 {
1964  bool supported = true;
1965 
1966  std::array<DataType,6> supportedTypes = {
1967  DataType::BFloat16,
1968  DataType::Float32,
1969  DataType::Float16,
1970  DataType::QAsymmS8,
1971  DataType::QAsymmU8,
1972  DataType::QSymmS16
1973  };
1974 
1975  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1976  "Reference subtraction: input 0 is not a supported type.");
1977 
1978  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1979  "Reference subtraction: input 1 is not a supported type.");
1980 
1981  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1982  "Reference subtraction: output is not a supported type.");
1983 
1984  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1985  "Reference subtraction: input 0 and Input 1 types are mismatched");
1986 
1987  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1988  "Reference subtraction: input and output types are mismatched");
1989 
1990  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1991  "Reference subtraction: shapes are not suitable for implicit broadcast.");
1992 
1993  return supported;
1994 }
1995 
1997  const TensorInfo& alpha,
1998  const TensorInfo& output,
1999  Optional<std::string&> reasonIfUnsupported) const
2000 {
2001  bool supported = true;
2002 
2003  std::array<DataType, 6> supportedTypes
2004  {
2005  DataType::BFloat16,
2006  DataType::Float32,
2007  DataType::Float16,
2008  DataType::QAsymmS8,
2009  DataType::QAsymmU8,
2010  DataType::QSymmS16
2011  };
2012 
2013  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2014  "PReLU: input is not a supported type.");
2015 
2016  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2017  "PReLU: alpha is not a supported type.");
2018 
2019  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2020  "PReLU: output is not a supported type.");
2021 
2022  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2023  "PReLU: input, alpha and output types are mismatched");
2024 
2025  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2026  "PReLU: shapes are not suitable for implicit broadcast");
2027 
2028  return supported;
2029 }
2030 
2032  const TensorInfo& output,
2033  const TransposeConvolution2dDescriptor& descriptor,
2034  const TensorInfo& weights,
2035  const Optional<TensorInfo>& biases,
2036  Optional<std::string&> reasonIfUnsupported) const
2037 {
2038  IgnoreUnused(descriptor);
2039  bool supported = true;
2040 
2041  std::array<DataType,7> supportedTypes =
2042  {
2043  DataType::BFloat16,
2044  DataType::Float32,
2045  DataType::Float16,
2046  DataType::QAsymmS8,
2047  DataType::QAsymmU8,
2048  DataType::QSymmS8,
2049  DataType::QSymmS16
2050  };
2051 
2052  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2053  "Reference TransposeConvolution2d: input is not a supported type.");
2054 
2055  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2056  "Reference TransposeConvolution2d: output is not a supported type.");
2057 
2058  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2059  "Reference TransposeConvolution2d: input and output types mismatched.");
2060 
2061 
2062  const DataType inputType = input.GetDataType();
2063  if (IsQuantized8BitType(inputType))
2064  {
2066  std::array<DataType, 4> supportedWeightTypes =
2067  {
2068  DataType::QAsymmS8,
2069  DataType::QAsymmU8,
2070  DataType::QSymmS8,
2071  DataType::QuantizedSymm8PerAxis //Deprecated
2072  };
2074 
2075  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2076  "Reference TransposeConvolution2d: weights type not supported for "
2077  "quantized input.");
2078  }
2079  else
2080  {
2081  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2082  "Reference TransposeConvolution2d: weights is not a supported type.");
2083 
2084  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2085  "Reference TransposeConvolution2d: input and weights types mismatched.");
2086  }
2087 
2088  if (biases.has_value())
2089  {
2090  std::array<DataType,4> biasesSupportedTypes =
2091  {
2092  DataType::BFloat16,
2093  DataType::Float32,
2094  DataType::Float16,
2095  DataType::Signed32
2096  };
2097  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2098  "Reference TransposeConvolution2d: biases is not a supported type.");
2099  }
2100 
2101  return supported;
2102 }
2103 
2104 bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input,
2105  const TensorInfo& output,
2106  const TransposeDescriptor& descriptor,
2107  Optional<std::string&> reasonIfUnsupported) const
2108 {
2109  IgnoreUnused(descriptor);
2110  bool supported = true;
2111 
2112  // Define supported output and inputs types.
2113  std::array<DataType, 6> supportedTypes =
2114  {
2115  DataType::BFloat16,
2116  DataType::Float32,
2117  DataType::Float16,
2118  DataType::QAsymmS8,
2119  DataType::QAsymmU8,
2120  DataType::QSymmS16
2121  };
2122 
2123  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2124  "Reference transpose: input is not a supported type.");
2125 
2126  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2127  "Reference transpose: output is not a supported type.");
2128 
2129  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2130  "Reference transpose: input and output types are mismatched.");
2131 
2132  return supported;
2133 }
2134 
2135 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:70
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsDebugSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsEqualSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Copyright (c) 2020 ARM Limited.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
A ResizeDescriptor for the ResizeLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:241
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMemCopySupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
A PadDescriptor for the PadLayer.
DataType
Definition: Types.hpp:32
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsRsqrtSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:95
An OriginsDescriptor for the ConcatLayer.
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsFakeQuantizationSupported(const BackendId &backend, const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_BiasEnabled
Enable/disable bias.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStackSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const StackDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsResizeBilinearSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
bool m_PeepholeEnabled
Enable/disable peephole.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A QLstmDescriptor for the QLstmLayer.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
A SliceDescriptor for the SliceLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:90
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A MeanDescriptor for the MeanLayer.
bool IsMergerSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsGatherSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsDetectionPostProcessSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const DetectionPostProcessDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A PermuteDescriptor for the PermuteLayer.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })