ArmNN
 20.02
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/Descriptors.hpp>
12 
13 #include <LayerSupportCommon.hpp>
15 
16 #include <boost/cast.hpp>
17 
18 #include <vector>
19 #include <array>
20 
21 using namespace boost;
22 
23 namespace armnn
24 {
25 
26 namespace
27 {
28 
29 template<typename Float32Func, typename Uint8Func, typename ... Params>
30 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
31  DataType dataType,
32  Float32Func floatFuncPtr,
33  Uint8Func uint8FuncPtr,
34  Params&&... params)
35 {
36  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
37  dataType,
38  &FalseFunc<Params...>,
39  floatFuncPtr,
40  uint8FuncPtr,
41  &FalseFunc<Params...>,
42  &FalseFunc<Params...>,
43  std::forward<Params>(params)...);
44 }
45 
46 } // anonymous namespace
47 
48 namespace
49 {
50 
51 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
52  unsigned int actual,
53  std::string& layerStr,
54  std::string& tensorName)
55 {
56  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
57  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
58 
59  return errorMsg;
60 }
61 
62 } // anonymous namespace
63 
64 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
65  Optional<std::string&> reasonIfUnsupported) const
66 {
67  return IsElementwiseUnarySupported(input,
68  output,
69  ElementwiseUnaryDescriptor(UnaryOperation::Abs),
70  reasonIfUnsupported);
71 }
72 
74  const TensorInfo& output,
75  const ActivationDescriptor& descriptor,
76  Optional<std::string&> reasonIfUnsupported) const
77 {
78  bool supported = true;
79 
80  // Define supported types.
81  std::array<DataType,6> supportedTypes = {
82  DataType::BFloat16,
83  DataType::Float32,
84  DataType::Float16,
85  DataType::QAsymmS8,
86  DataType::QAsymmU8,
87  DataType::QSymmS16
88  };
89 
90  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
91  "Reference activation: input type not supported.");
92 
93  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
94  "Reference activation: output type not supported.");
95 
96  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
97  "Reference activation: input and output types mismatched.");
98 
99  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
100  "Reference activation: input and output shapes are of different rank.");
101 
102 
103  struct ActivationFunctionSupported : public Rule
104  {
105  ActivationFunctionSupported(const ActivationDescriptor& desc)
106  {
107  switch(desc.m_Function)
108  {
109  case ActivationFunction::Abs:
110  case ActivationFunction::BoundedReLu:
111  case ActivationFunction::Elu:
112  case ActivationFunction::HardSwish:
113  case ActivationFunction::LeakyReLu:
114  case ActivationFunction::Linear:
115  case ActivationFunction::ReLu:
116  case ActivationFunction::Sigmoid:
117  case ActivationFunction::SoftReLu:
118  case ActivationFunction::Sqrt:
119  case ActivationFunction::Square:
120  case ActivationFunction::TanH:
121  {
122  m_Res = true;
123  break;
124  }
125  default:
126  {
127  m_Res = false;
128  break;
129  }
130  }
131  }
132  };
133 
134  // Function is supported
135  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
136  "Reference activation: function not supported.");
137 
138  return supported;
139 }
140 
142  const TensorInfo& input1,
143  const TensorInfo& output,
144  Optional<std::string&> reasonIfUnsupported) const
145 {
146  bool supported = true;
147 
148  std::array<DataType,6> supportedTypes = {
149  DataType::BFloat16,
150  DataType::Float32,
151  DataType::Float16,
152  DataType::QAsymmS8,
153  DataType::QAsymmU8,
154  DataType::QSymmS16
155  };
156 
157  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
158  "Reference addition: input 0 is not a supported type.");
159 
160  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
161  "Reference addition: input 1 is not a supported type.");
162 
163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
164  "Reference addition: output is not a supported type.");
165 
166  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
167  "Reference addition: input 0 and Input 1 types are mismatched");
168 
169  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
170  "Reference addition: input and output types are mismatched");
171 
172  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
173  "Reference addition: shapes are not suitable for implicit broadcast.");
174 
175  return supported;
176 }
177 
179  const armnn::ArgMinMaxDescriptor &descriptor,
180  armnn::Optional<std::string &> reasonIfUnsupported) const
181 {
182  IgnoreUnused(descriptor);
183 
184  std::array<DataType, 5> supportedTypes =
185  {
186  DataType::BFloat16,
187  DataType::Float32,
188  DataType::QAsymmU8,
189  DataType::QSymmS16,
190  DataType::Signed32
191  };
192 
193  bool supported = true;
194 
195  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
196  "Reference ArgMinMax: input is not a supported type.");
197  supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
198  "Reference ArgMinMax: output type not supported");
199 
200  return supported;
201 }
202 
204  const TensorInfo& output,
205  const TensorInfo& mean,
206  const TensorInfo& variance,
207  const TensorInfo& beta,
208  const TensorInfo& gamma,
209  const BatchNormalizationDescriptor& descriptor,
210  Optional<std::string&> reasonIfUnsupported) const
211 {
212  IgnoreUnused(descriptor);
213 
214  std::array<DataType, 5> supportedTypes =
215  {
216  DataType::BFloat16,
217  DataType::Float32,
218  DataType::Float16,
219  DataType::QAsymmU8,
220  DataType::QSymmS16
221  };
222 
223  bool supported = true;
224 
225  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
226  "Reference batch normalization: input is not a supported type.");
227 
228  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
229  "Reference batch normalization: output is not a supported type.");
230 
231  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
232  "Reference batch normalization: input and output types are mismatched");
233 
234  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
235  "Reference batch normalization: mean is not a supported type.");
236 
237  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
238  "Reference batch normalization: variance is not a supported type.");
239 
240  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
241  "Reference batch normalization: beta is not a supported type.");
242 
243  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
244  "Reference batch normalization: gamma is not a supported type.");
245 
246  return supported;
247 }
248 
250  const TensorInfo& output,
251  const BatchToSpaceNdDescriptor& descriptor,
252  Optional<std::string&> reasonIfUnsupported) const
253 {
254  IgnoreUnused(descriptor);
255 
256  bool supported = true;
257 
258  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
259  std::string inputTensorStr = "input";
260  std::string outputTensorStr = "output";
261 
262  // Define supported types.
263  std::array<DataType,5> supportedTypes =
264  {
265  DataType::BFloat16,
266  DataType::Float32,
267  DataType::Float16,
268  DataType::QAsymmU8,
269  DataType::QSymmS16
270  };
271 
272  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
273  "Reference BatchToSpaceNd: input type not supported.");
274 
275  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
276  "Reference BatchToSpaceNd: output type not supported.");
277 
278  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
279  "Reference BatchToSpaceNd: input and output types mismatched.");
280 
281  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
282  reasonIfUnsupported,
283  CreateIncorrectDimensionsErrorMsg(4,
284  output.GetNumDimensions(),
285  batchToSpaceNdLayerStr,
286  outputTensorStr).data());
287 
288  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
289  reasonIfUnsupported,
290  CreateIncorrectDimensionsErrorMsg(4,
291  input.GetNumDimensions(),
292  batchToSpaceNdLayerStr,
293  inputTensorStr).data());
294 
295  return supported;
296 }
297 
298 bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
299  const TensorInfo& input1,
300  const TensorInfo& output,
301  const ComparisonDescriptor& descriptor,
302  Optional<std::string&> reasonIfUnsupported) const
303 {
304  IgnoreUnused(descriptor);
305 
306  std::array<DataType, 5> supportedInputTypes =
307  {
308  DataType::BFloat16,
309  DataType::Float32,
310  DataType::Float16,
311  DataType::QAsymmU8,
312  DataType::QSymmS16
313  };
314 
315  bool supported = true;
316  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
317  "Reference comparison: input 0 is not a supported type");
318 
319  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
320  "Reference comparison: input 0 and Input 1 types are mismatched");
321 
322  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
323  "Reference comparison: output is not of type Boolean");
324 
325  return supported;
326 }
327 
328 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
329  const TensorInfo& output,
330  const ConcatDescriptor& descriptor,
331  Optional<std::string&> reasonIfUnsupported) const
332 {
333  IgnoreUnused(descriptor);
334 
335  bool supported = true;
336  std::array<DataType,6> supportedTypes =
337  {
338  DataType::BFloat16,
339  DataType::Float32,
340  DataType::Float16,
341  DataType::QAsymmU8,
342  DataType::QAsymmS8,
343  DataType::QSymmS16
344  };
345 
346  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
347  "Reference concatenation: output type not supported");
348  for (const TensorInfo* input : inputs)
349  {
350  BOOST_ASSERT(input != nullptr);
351  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
352  "Reference concatenation: input type not supported");
353 
354  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
355  "Reference concatenation: input and output types mismatched.");
356  }
357 
358  return supported;
359 }
360 
362  Optional<std::string&> reasonIfUnsupported) const
363 {
364  std::array<DataType,7> supportedTypes =
365  {
366  DataType::BFloat16,
367  DataType::Float32,
368  DataType::Signed32,
369  DataType::QAsymmU8,
370  DataType::QAsymmS8,
371  DataType::QSymmS8,
372  DataType::QSymmS16
373  };
374 
375  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
376  "Reference constant: output is not a supported type.");
377 }
378 
380  const TensorInfo& output,
381  Optional<std::string&> reasonIfUnsupported) const
382 {
383  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
384  input.GetDataType(),
385  &TrueFunc<>,
386  &FalseInputFuncF32<>,
387  &FalseFuncU8<>,
388  &FalseFuncI32<>,
389  &FalseFuncU8<>) &&
390  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
391  output.GetDataType(),
392  &FalseOutputFuncF16<>,
393  &TrueFunc<>,
394  &FalseFuncU8<>,
395  &FalseFuncI32<>,
396  &FalseFuncU8<>));
397 }
398 
400  const TensorInfo& output,
401  Optional<std::string&> reasonIfUnsupported) const
402 {
403  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
404  input.GetDataType(),
405  &FalseInputFuncF16<>,
406  &TrueFunc<>,
407  &FalseFuncU8<>,
408  &FalseFuncI32<>,
409  &FalseFuncU8<>) &&
410  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
411  output.GetDataType(),
412  &TrueFunc<>,
413  &FalseOutputFuncF32<>,
414  &FalseFuncU8<>,
415  &FalseFuncI32<>,
416  &FalseFuncU8<>));
417 }
418 
420  const TensorInfo& output,
421  const Convolution2dDescriptor& descriptor,
422  const TensorInfo& weights,
423  const Optional<TensorInfo>& biases,
424  Optional<std::string&> reasonIfUnsupported) const
425 {
426  bool supported = true;
427 
428  // Define supported types.
429  std::array<DataType,7> supportedTypes =
430  {
431  DataType::BFloat16,
432  DataType::Float32,
433  DataType::Float16,
434  DataType::QAsymmU8,
435  DataType::QAsymmS8,
436  DataType::QSymmS8,
437  DataType::QSymmS16
438  };
439 
440  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
441  "Reference Convolution2d: input is not a supported type.");
442 
443  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
444  "Reference Convolution2d: output is not a supported type.");
445 
446  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
447  "Reference Convolution2d: input and output types mismatched.");
448 
449  const DataType inputType = input.GetDataType();
450  if (IsQuantized8BitType(inputType))
451  {
453  std::array<DataType, 4> supportedWeightTypes =
454  {
455  DataType::QAsymmU8,
456  DataType::QSymmS8,
457  DataType::QAsymmS8,
458  DataType::QuantizedSymm8PerAxis // deprecated
459  };
461 
462  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
463  "Reference Convolution2d: weights type not supported for quantized input.");
464  }
465  else
466  {
467  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
468  "Reference Convolution2d: weights is not a supported type.");
469 
470  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
471  "Reference Convolution2d: input and weights types mismatched.");
472  }
473 
474  if (biases.has_value())
475  {
476  std::array<DataType,4> biasesSupportedTypes =
477  {
478  DataType::BFloat16,
479  DataType::Float32,
480  DataType::Float16,
481  DataType::Signed32
482  };
483 
484  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
485  "Reference Convolution2d: biases is not a supported type.");
486  }
487  IgnoreUnused(descriptor);
488 
489  return supported;
490 }
491 
493  const TensorInfo& output,
494  Optional<std::string&> reasonIfUnsupported) const
495 {
496  bool supported = true;
497 
498  std::array<DataType, 8> supportedTypes =
499  {
500  DataType::BFloat16,
501  DataType::Float16,
502  DataType::Float32,
503  DataType::QAsymmU8,
504  DataType::QAsymmS8,
505  DataType::QSymmS8,
506  DataType::QSymmS16,
507  DataType::Signed32
508  };
509 
510  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
511  "Reference for Debug layer: input type not supported");
512 
513  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
514  "Reference for Debug layer: output type not supported");
515 
516  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
517  "Reference for Debug layer: input and output types are mismatched");
518 
519  return supported;
520 }
521 
522 bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
523  const TensorInfo& output,
524  const DepthToSpaceDescriptor& descriptor,
525  Optional<std::string&> reasonIfUnsupported) const
526 {
527  IgnoreUnused(descriptor);
528  bool supported = true;
529 
530  std::array<DataType,5> supportedTypes =
531  {
532  DataType::BFloat16,
533  DataType::Float32,
534  DataType::Float16,
535  DataType::QAsymmU8,
536  DataType::QSymmS16
537  };
538 
539  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
540  "Reference DepthToSpace: input type not supported");
541 
542  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
543  "Reference DepthToSpace: output type not supported");
544 
545  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
546  "Reference DepthToSpace: input and output types are mismatched");
547 
548  return supported;
549 }
550 
552  const TensorInfo& output,
553  const DepthwiseConvolution2dDescriptor& descriptor,
554  const TensorInfo& weights,
555  const Optional<TensorInfo>& biases,
556  Optional<std::string&> reasonIfUnsupported) const
557 {
558  bool supported = true;
559 
560  // Define supported types.
561  std::array<DataType,7> supportedTypes =
562  {
563  DataType::BFloat16,
564  DataType::Float32,
565  DataType::Float16,
566  DataType::QSymmS8,
567  DataType::QAsymmS8,
568  DataType::QAsymmU8,
569  DataType::QSymmS16
570  };
571 
572  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
573  "Reference DepthwiseConvolution2d: input is not a supported type.");
574 
575  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
576  "Reference DepthwiseConvolution2d: output is not a supported type.");
577 
578  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
579  "Reference DepthwiseConvolution2d: input and output types mismatched.");
580 
582  std::array<DataType, 3> supportedWeightTypes =
583  {
584  DataType::QAsymmU8,
585  DataType::QSymmS8,
586  DataType::QuantizedSymm8PerAxis // deprecated
587  };
589 
590  const DataType inputType = input.GetDataType();
591  if (IsQuantized8BitType(inputType))
592  {
593 
594  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
595  "Reference convolution2d: weights type not supported for quantized input.");
596  }
597  else
598  {
599  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
600  "Reference DepthwiseConvolution2d: weights is not a supported type.");
601 
602  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
603  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
604  }
605 
606  if (biases.has_value())
607  {
608  std::array<DataType,4> biasesSupportedTypes =
609  {
610  DataType::BFloat16,
611  DataType::Float32,
612  DataType::Float16,
613  DataType::Signed32
614  };
615  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
616  "Reference DepthwiseConvolution2d: biases is not a supported type.");
617  }
618  IgnoreUnused(descriptor);
619 
620  return supported;
621 
622 }
623 
625  const TensorInfo& output,
626  Optional<std::string&> reasonIfUnsupported) const
627 {
628  bool supported = true;
629 
630  std::array<DataType,4> supportedInputTypes = {
631  DataType::QAsymmS8,
632  DataType::QAsymmU8,
633  DataType::QSymmS8,
634  DataType::QSymmS16
635  };
636 
637  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
638  "Reference for Dequantize layer: input type not supported.");
639 
640  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
641  "Reference for Dequantize layer: per-axis quantized input not support .");
642 
643  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
644  "Reference dequantize: per-axis quantized input not support .");
645 
646  std::array<DataType,3> supportedOutputTypes = {
647  DataType::BFloat16,
648  DataType::Float32,
649  DataType::Float16
650  };
651 
652  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
653  "Reference for Dequantize layer: output type not supported.");
654 
655  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
656  "Reference for Dequantize layer: input/output shapes have different num total "
657  "elements.");
658 
659  return supported;
660 }
661 
663  const TensorInfo& scores,
664  const TensorInfo& anchors,
665  const TensorInfo& detectionBoxes,
666  const TensorInfo& detectionClasses,
667  const TensorInfo& detectionScores,
668  const TensorInfo& numDetections,
669  const DetectionPostProcessDescriptor& descriptor,
670  Optional<std::string&> reasonIfUnsupported) const
671 {
672  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
673 
674  bool supported = true;
675 
676  std::array<DataType,4> supportedInputTypes =
677  {
678  DataType::BFloat16,
679  DataType::Float32,
680  DataType::QAsymmU8,
681  DataType::QSymmS16
682  };
683 
684  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
685  "Reference DetectionPostProcess: input 0 is not a supported type.");
686 
687  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
688  "Reference DetectionPostProcess: input 1 is not a supported type.");
689 
690  return supported;
691 }
692 
693 bool RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
694  const TensorInfo& output,
695  const DepthwiseConvolution2dDescriptor& descriptor,
696  const TensorInfo& weights,
697  const Optional<TensorInfo>& biases,
698  Optional<std::string&> reasonIfUnsupported) const
699 {
700  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
701 }
702 
704  const TensorInfo& input1,
705  const TensorInfo& output,
706  Optional<std::string&> reasonIfUnsupported) const
707 {
708  bool supported = true;
709 
710  std::array<DataType,5> supportedTypes = {
711  DataType::BFloat16,
712  DataType::Float32,
713  DataType::Float16,
714  DataType::QAsymmU8,
715  DataType::QSymmS16
716  };
717 
718  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
719  "Reference division: input 0 is not a supported type.");
720 
721  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
722  "Reference division: input 1 is not a supported type.");
723 
724  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
725  "Reference division: output is not a supported type.");
726 
727  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
728  "Reference division: input 0 and Input 1 types are mismatched");
729 
730  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
731  "Reference division: input and output types are mismatched");
732 
733  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
734  "Reference division: shapes are not suitable for implicit broadcast.");
735 
736  return supported;
737 }
738 
739 bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
740  const TensorInfo& output,
741  const ElementwiseUnaryDescriptor& descriptor,
742  Optional<std::string&> reasonIfUnsupported) const
743 {
744  IgnoreUnused(descriptor);
745 
746  std::array<DataType, 5> supportedTypes =
747  {
748  DataType::BFloat16,
749  DataType::Float32,
750  DataType::Float16,
751  DataType::QAsymmU8,
752  DataType::QSymmS16
753  };
754 
755  bool supported = true;
756 
757  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
758  "Reference elementwise unary: input type not supported");
759 
760  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
761  "Reference elementwise unary: output type not supported");
762 
763  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
764  "Reference elementwise unary: input and output types not matching");
765 
766  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
767  "Reference elementwise unary: input and output shapes"
768  "have different number of total elements");
769 
770  return supported;
771 }
772 
774  const TensorInfo& input1,
775  const TensorInfo& output,
776  Optional<std::string&> reasonIfUnsupported) const
777 {
778  return IsComparisonSupported(input0,
779  input1,
780  output,
781  ComparisonDescriptor(ComparisonOperation::Equal),
782  reasonIfUnsupported);
783 }
784 
786  const FakeQuantizationDescriptor& descriptor,
787  Optional<std::string&> reasonIfUnsupported) const
788 {
789  IgnoreUnused(descriptor);
790  bool supported = true;
791 
792  std::array<DataType,1> supportedTypes =
793  {
794  DataType::Float32
795  };
796 
797  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
798  "Reference fake quantization: input type not supported.");
799 
800  return supported;
801 }
802 
804  const TensorInfo& output,
805  Optional<std::string&> reasonIfUnsupported) const
806 {
807  IgnoreUnused(output);
808  bool supported = true;
809 
810  std::array<DataType,4> supportedTypes =
811  {
812  DataType::BFloat16,
813  DataType::Float32,
814  DataType::Float16,
815  DataType::QSymmS16
816  };
817 
818  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
819  "Reference Floor: input type not supported.");
820 
821  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
822  "Reference Floor: output type not supported.");
823 
824  return supported;
825 }
826 
828  const TensorInfo& output,
829  const TensorInfo& weights,
830  const TensorInfo& biases,
831  const FullyConnectedDescriptor& descriptor,
832  Optional<std::string&> reasonIfUnsupported) const
833 {
834  bool supported = true;
835 
836  // Define supported types.
837  std::array<DataType,6> supportedTypes =
838  {
839  DataType::BFloat16,
840  DataType::Float32,
841  DataType::Float16,
842  DataType::QAsymmU8,
843  DataType::QAsymmS8,
844  DataType::QSymmS16
845  };
846 
847  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
848  "Reference Fully Connected: input type not supported.");
849 
850  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
851  "Reference Fully Connected: output type not supported.");
852 
853  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
854  "Reference Fully Connected: input and output types mismatched.");
855 
856  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
857  "Reference Fully Connected: weights type not supported.");
858 
860  std::array<DataType, 3> supportedWeightTypes =
861  {
862  DataType::QAsymmU8,
863  DataType::QSymmS8,
864  DataType::QuantizedSymm8PerAxis // deprecated
865  };
867 
868  if (IsQuantized8BitType(input.GetDataType()))
869  {
870 
871  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
872  "Reference Fully Connected: weights type not supported for quantized input.");
873  }
874  else
875  {
876  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
877  "Reference Fully Connected: weights is not a supported type.");
878 
879  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
880  "Reference Fully Connected: input and weights types mismatched.");
881  }
882 
883  if (descriptor.m_BiasEnabled)
884  {
885  // Defined supported types for bias
886  std::array<DataType, 4>
887  supportedBiasTypes =
888  {
889  DataType::BFloat16,
890  DataType::Float32,
891  DataType::Float16,
892  DataType::Signed32
893  };
894 
895  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
896  "Reference Fully Connected: bias type not supported.");
897 
898  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
899  "Reference Fully Connected: bias and weight types mismatch.");
900 
901  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
902  "Reference Fully Connected: bias type inferred from weights is incompatible.");
903 
904  }
905 
906  return supported;
907 }
908 
910  const armnn::TensorInfo& input1,
911  const armnn::TensorInfo& output,
912  armnn::Optional<std::string&> reasonIfUnsupported) const
913 {
914  bool supported = true;
915  std::array<DataType,5> supportedTypes =
916  {
917  DataType::BFloat16,
918  DataType::Float32,
919  DataType::Float16,
920  DataType::QAsymmU8,
921  DataType::QSymmS16
922  };
923 
924  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
925  "Reference Gather: input type not supported");
926 
927  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
928  "Reference Gather: output type not supported");
929 
930  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
931  "Reference Gather: indices (input1) type not supported");
932 
933  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
934  "Reference Gather: input and output types not matching");
935 
936  return supported;
937 }
938 
940  const TensorInfo& input1,
941  const TensorInfo& output,
942  Optional<std::string&> reasonIfUnsupported) const
943 {
944  return IsComparisonSupported(input0,
945  input1,
946  output,
947  ComparisonDescriptor(ComparisonOperation::Greater),
948  reasonIfUnsupported);
949 }
950 
952  Optional<std::string&> /*reasonIfUnsupported*/) const
953 {
954  return true;
955 }
956 
957 bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
958  const TensorInfo& output,
959  const InstanceNormalizationDescriptor& descriptor,
960  Optional<std::string&> reasonIfUnsupported) const
961 {
962  IgnoreUnused(descriptor);
963  // Define supported types
964  std::array<DataType, 3> supportedTypes =
965  {
966  DataType::BFloat16,
967  DataType::Float32,
968  DataType::Float16
969  };
970 
971  bool supported = true;
972 
973  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
974  "Reference Instance Normalization: input type not supported.");
975 
976  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
977  "Reference Instance Normalization: output type not supported.");
978 
979  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
980  "Reference Instance Normalization: input and output types mismatched.");
981 
982  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
983  "Reference Instance Normalization: input and output shapes have different "
984  "num total elements.");
985 
986  return supported;
987 }
988 
990  const TensorInfo& output,
991  const L2NormalizationDescriptor& descriptor,
992  Optional<std::string&> reasonIfUnsupported) const
993 {
994  IgnoreUnused(descriptor);
995  // Define supported types
996  std::array<DataType, 5> supportedTypes =
997  {
998  DataType::BFloat16,
999  DataType::Float32,
1000  DataType::Float16,
1001  DataType::QAsymmU8,
1002  DataType::QSymmS16
1003  };
1004 
1005  bool supported = true;
1006 
1007  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1008  "Reference L2normalization: input type not supported.");
1009 
1010  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1011  "Reference L2normalization: output type not supported.");
1012 
1013  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1014  "Reference L2normalization: input and output types mismatched.");
1015 
1016  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1017  "Reference L2normalization: input and output shapes have different "
1018  "num total elements.");
1019 
1020  return supported;
1021 }
1022 
1023 bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1024  const TensorInfo& output,
1025  const LogSoftmaxDescriptor& descriptor,
1026  Optional<std::string&> reasonIfUnsupported) const
1027 {
1028  IgnoreUnused(descriptor);
1029 
1030  std::array<DataType, 3> supportedTypes =
1031  {
1032  DataType::BFloat16,
1033  DataType::Float32,
1034  DataType::Float16
1035  };
1036 
1037  bool supported = true;
1038  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1039  "Reference LogSoftmax: input type not supported");
1040 
1041  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1042  "Reference LogSoftmax: output type not supported");
1043 
1044  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1045  "Reference LogSoftmax: input and output types do not match");
1046 
1047  return supported;
1048 }
1049 
1051  const TensorInfo& outputStateIn,
1052  const TensorInfo& cellStateIn,
1053  const TensorInfo& scratchBuffer,
1054  const TensorInfo& outputStateOut,
1055  const TensorInfo& cellStateOut,
1056  const TensorInfo& output,
1057  const LstmDescriptor& descriptor,
1058  const LstmInputParamsInfo& paramsInfo,
1059  Optional<std::string&> reasonIfUnsupported) const
1060 {
1061  IgnoreUnused(descriptor);
1062  IgnoreUnused(paramsInfo);
1063 
1064  bool supported = true;
1065 
1066  std::array<DataType,3> supportedTypes = {
1067  DataType::BFloat16,
1068  DataType::Float32,
1069  DataType::QSymmS16
1070  };
1071 
1072  // check inputs and outputs
1073  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1074  "Reference Lstm: input is not a supported type.");
1075  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1076  "Reference Lstm: input and outputStateIn types are mismatched");
1077  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1078  "Reference Lstm: input and cellStateIn types are mismatched");
1079  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1080  "Reference Lstm: input and scratchBuffer types are mismatched");
1081  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1082  "Reference Lstm: input and outputStateOut types are mismatched");
1083  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1084  "Reference Lstm: input and cellStateOut types are mismatched");
1085  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1086  "Reference Lstm: input and output types are mismatched");
1087  // check layer parameters
1088  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1089  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1090  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1091  "Reference Lstm: input and InputToCellWeights types are mismatched");
1092  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1093  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1094  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1095  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1096  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1097  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1098  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1099  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1100  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1101  "Reference Lstm: input and ForgetGateBias types are mismatched");
1102  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1103  "Reference Lstm: input and CellBias types are mismatched");
1104  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1105  "Reference Lstm: input and OutputGateBias types are mismatched");
1106  if (!descriptor.m_CifgEnabled)
1107  {
1108  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1109  "Reference Lstm: input and InputToInputWeights types are mismatched");
1110  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1111  reasonIfUnsupported,
1112  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1113  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1114  "Reference Lstm: input and InputGateBias types are mismatched");
1115  if (descriptor.m_PeepholeEnabled)
1116  {
1117  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1118  reasonIfUnsupported,
1119  "Reference Lstm: input and CellToInputWeights types are mismatched");
1120  }
1121  }
1122  if (descriptor.m_PeepholeEnabled)
1123  {
1124  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1125  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1126  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1127  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1128  }
1129  if (descriptor.m_ProjectionEnabled)
1130  {
1131  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1132  "Reference Lstm: input and mProjectionWeights types are mismatched");
1133  if (paramsInfo.m_ProjectionBias != nullptr)
1134  {
1135  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1136  "Reference Lstm: input and ProjectionBias types are mismatched");
1137  }
1138  }
1139  if (descriptor.m_LayerNormEnabled)
1140  {
1141  if (!descriptor.m_CifgEnabled)
1142  {
1143  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1144  reasonIfUnsupported,
1145  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1146  }
1147  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1148  reasonIfUnsupported,
1149  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1150  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1151  reasonIfUnsupported,
1152  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1153  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1154  reasonIfUnsupported,
1155  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1156  }
1157 
1158  return supported;
1159 }
1160 
1162  const TensorInfo& input1,
1163  const TensorInfo& output,
1164  Optional<std::string&> reasonIfUnsupported) const
1165 {
1166  bool supported = true;
1167 
1168  std::array<DataType,6> supportedTypes = {
1169  DataType::BFloat16,
1170  DataType::Float32,
1171  DataType::Float16,
1172  DataType::QAsymmS8,
1173  DataType::QAsymmU8,
1174  DataType::QSymmS16
1175  };
1176 
1177  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1178  "Reference maximum: input 0 is not a supported type.");
1179 
1180  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1181  "Reference maximum: input 1 is not a supported type.");
1182 
1183  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1184  "Reference maximum: output is not a supported type.");
1185 
1186  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1187  "Reference maximum: input 0 and Input 1 types are mismatched");
1188 
1189  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1190  "Reference maximum: input and output types are mismatched");
1191 
1192  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1193  "Reference maximum: shapes are not suitable for implicit broadcast.");
1194 
1195  return supported;
1196 }
1197 
1199  const TensorInfo& output,
1200  const MeanDescriptor& descriptor,
1201  Optional<std::string&> reasonIfUnsupported) const
1202 {
1203  bool supported = true;
1204  std::string meanLayerStr = "Mean";
1205  std::string outputTensorStr = "output";
1206 
1207  std::array<DataType,5> supportedTypes =
1208  {
1209  DataType::BFloat16,
1210  DataType::Float32,
1211  DataType::Float16,
1212  DataType::QAsymmU8,
1213  DataType::QSymmS16
1214  };
1215 
1216  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1217  "Reference Mean: input type not supported.");
1218 
1219  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1220  "Reference Mean: input and output types are mismatched");
1221 
1222  if (descriptor.m_KeepDims)
1223  {
1224  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1225  reasonIfUnsupported,
1226  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1227  output.GetNumDimensions(),
1228  meanLayerStr, outputTensorStr).data());
1229  }
1230  else if (descriptor.m_Axis.empty())
1231  {
1232  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1233  reasonIfUnsupported,
1234  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1235  meanLayerStr, outputTensorStr).data());
1236  }
1237  else
1238  {
1239  auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1240 
1241  if (outputDim > 0)
1242  {
1243  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1244  reasonIfUnsupported,
1245  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1246  meanLayerStr, outputTensorStr).data());
1247  }
1248  else
1249  {
1250  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1251  reasonIfUnsupported,
1252  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1253  meanLayerStr, outputTensorStr).data());
1254  }
1255  }
1256 
1257  return supported;
1258 }
1259 
1260 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
1261  const TensorInfo& output,
1262  const MergerDescriptor& descriptor,
1263  Optional<std::string&> reasonIfUnsupported) const
1264 {
1265  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1266 }
1267 
1269  const TensorInfo &output,
1270  Optional<std::string &> reasonIfUnsupported) const
1271 {
1272  bool supported = true;
1273 
1274  std::array<DataType,6> supportedTypes =
1275  {
1276  DataType::BFloat16,
1277  DataType::Float32,
1278  DataType::Float16,
1279  DataType::QAsymmU8,
1280  DataType::QSymmS16,
1281  DataType::Boolean
1282  };
1283 
1284  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1285  "Reference MemCopy: input type not supported");
1286 
1287  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1288  "Reference MemCopy: output type not supported");
1289 
1290  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1291  "Reference MemCopy: input and output types are mismatched");
1292 
1293  return supported;
1294 }
1295 
1297  const TensorInfo& input1,
1298  const TensorInfo& output,
1299  Optional<std::string&> reasonIfUnsupported) const
1300 {
1301  bool supported = true;
1302 
1303  std::array<DataType,5> supportedTypes = {
1304  DataType::BFloat16,
1305  DataType::Float32,
1306  DataType::Float16,
1307  DataType::QAsymmU8,
1308  DataType::QSymmS16
1309  };
1310 
1311  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1312  "Reference minimum: input 0 is not a supported type.");
1313 
1314  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1315  "Reference minimum: input 1 is not a supported type.");
1316 
1317  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1318  "Reference minimum: output is not a supported type.");
1319 
1320  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1321  "Reference minimum: input 0 and Input 1 types are mismatched");
1322 
1323  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1324  "Reference minimum: input and output types are mismatched");
1325 
1326  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1327  "Reference minimum: shapes are not suitable for implicit broadcast.");
1328 
1329  return supported;
1330 }
1331 
1333  const TensorInfo& input1,
1334  const TensorInfo& output,
1335  Optional<std::string&> reasonIfUnsupported) const
1336 {
1337  bool supported = true;
1338 
1339  std::array<DataType,6> supportedTypes = {
1340  DataType::BFloat16,
1341  DataType::Float32,
1342  DataType::Float16,
1343  DataType::QAsymmU8,
1344  DataType::QAsymmS8,
1345  DataType::QSymmS16
1346  };
1347 
1348  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1349  "Reference multiplication: input 0 is not a supported type.");
1350 
1351  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1352  "Reference multiplication: input 1 is not a supported type.");
1353 
1354  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1355  "Reference multiplication: output is not a supported type.");
1356 
1357  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1358  "Reference multiplication: input 0 and Input 1 types are mismatched");
1359 
1360  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1361  "Reference multiplication: input and output types are mismatched");
1362 
1363  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1364  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1365 
1366  return supported;
1367 }
1368 
1370  const TensorInfo& output,
1371  const NormalizationDescriptor& descriptor,
1372  Optional<std::string&> reasonIfUnsupported) const
1373 {
1374  IgnoreUnused(descriptor);
1375 
1376  // Define supported types
1377  std::array<DataType, 5> supportedTypes =
1378  {
1379  DataType::BFloat16,
1380  DataType::Float16,
1381  DataType::Float32,
1382  DataType::QAsymmU8,
1383  DataType::QSymmS16
1384  };
1385 
1386  bool supported = true;
1387 
1388  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1389  "Reference normalization: input type not supported.");
1390 
1391  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1392  "Reference normalization: output type not supported.");
1393 
1394  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1395  "Reference normalization: input and output shapes have different "
1396  "num total elements.");
1397 
1398  return supported;
1399 }
1400 
1402  Optional<std::string&> /*reasonIfUnsupported*/) const
1403 {
1404  return true;
1405 }
1406 
1408  const TensorInfo& output,
1409  const PadDescriptor& descriptor,
1410  Optional<std::string&> reasonIfUnsupported) const
1411 {
1412  IgnoreUnused(descriptor);
1413  bool supported = true;
1414 
1415  // Define supported output and inputs types.
1416  std::array<DataType,5> supportedTypes =
1417  {
1418  DataType::BFloat16,
1419  DataType::Float32,
1420  DataType::Float16,
1421  DataType::QAsymmU8,
1422  DataType::QSymmS16
1423  };
1424 
1425  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1426  "Reference pad: input is not a supported type.");
1427 
1428  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1429  "Reference pad: output is not a supported type.");
1430 
1431  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1432  "Reference pad: input and output types are mismatched.");
1433 
1434  return supported;
1435 }
1436 
1438  const TensorInfo& output,
1439  const PermuteDescriptor& descriptor,
1440  Optional<std::string&> reasonIfUnsupported) const
1441 {
1442  IgnoreUnused(descriptor);
1443  bool supported = true;
1444 
1445  // Define supported output and inputs types.
1446  std::array<DataType, 5> supportedTypes =
1447  {
1448  DataType::BFloat16,
1449  DataType::Float32,
1450  DataType::Float16,
1451  DataType::QAsymmU8,
1452  DataType::QSymmS16
1453  };
1454 
1455  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1456  "Reference permute: input is not a supported type.");
1457 
1458  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1459  "Reference permute: output is not a supported type.");
1460 
1461  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1462  "Reference permute: input and output types are mismatched.");
1463 
1464  return supported;
1465 }
1466 
1468  const TensorInfo& output,
1469  const Pooling2dDescriptor& descriptor,
1470  Optional<std::string&> reasonIfUnsupported) const
1471 {
1472  IgnoreUnused(descriptor);
1473  bool supported = true;
1474 
1475  // Define supported output and inputs types.
1476  std::array<DataType,6> supportedTypes =
1477  {
1478  DataType::BFloat16,
1479  DataType::Float32,
1480  DataType::Float16,
1481  DataType::QAsymmS8,
1482  DataType::QAsymmU8,
1483  DataType::QSymmS16
1484  };
1485 
1486  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1487  "Reference poolind2d: input is not a supported type.");
1488 
1489  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1490  "Reference poolind2d: output is not a supported type.");
1491 
1492  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1493  "Reference poolind2d: input and output types are mismatched.");
1494 
1495  return supported;
1496 }
1497 
1499  const TensorInfo& output,
1500  Optional<std::string&> reasonIfUnsupported) const
1501 {
1502  bool supported = true;
1503 
1504  // Define supported input types.
1505  std::array<DataType,7> supportedInputTypes = {
1506  DataType::BFloat16,
1507  DataType::Float32,
1508  DataType::Float16,
1509  DataType::QAsymmS8,
1510  DataType::QAsymmU8,
1511  DataType::QSymmS8,
1512  DataType::QSymmS16
1513  };
1514 
1515  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1516  "Reference quantize: input type not supported.");
1517 
1518  // Define supported output types.
1519  std::array<DataType,4> supportedOutputTypes = {
1520  DataType::QAsymmU8,
1521  DataType::QAsymmS8,
1522  DataType::QSymmS8,
1523  DataType::QSymmS16
1524  };
1525  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1526  "Reference quantize: output type not supported.");
1527 
1528  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1529  "Reference quantize: input and output shapes have different num total elements.");
1530 
1531  return supported;
1532 }
1533 
1535  const TensorInfo& output,
1536  const ReshapeDescriptor& descriptor,
1537  Optional<std::string&> reasonIfUnsupported) const
1538 {
1539  IgnoreUnused(output);
1540  IgnoreUnused(descriptor);
1541  // Define supported output types.
1542  std::array<DataType,7> supportedOutputTypes =
1543  {
1544  DataType::BFloat16,
1545  DataType::Float32,
1546  DataType::Float16,
1547  DataType::Signed32,
1548  DataType::QAsymmS8,
1549  DataType::QAsymmU8,
1550  DataType::QSymmS16
1551  };
1552 
1553  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1554  "Reference reshape: input type not supported.");
1555 }
1556 
1558  const TensorInfo& output,
1559  Optional<std::string&> reasonIfUnsupported) const
1560 {
1561  bool supported = true;
1562  std::array<DataType,5> supportedTypes =
1563  {
1564  DataType::BFloat16,
1565  DataType::Float32,
1566  DataType::Float16,
1567  DataType::QAsymmU8,
1568  DataType::QSymmS16
1569  };
1570 
1571  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1572  "Reference ResizeBilinear: input type not supported");
1573 
1574  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1575  "Reference ResizeBilinear: output type not supported");
1576 
1577  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1578  "Reference ResizeBilinear: input and output types not matching");
1579 
1580  return supported;
1581 }
1582 
1584  const TensorInfo& output,
1585  const ResizeDescriptor& descriptor,
1586  Optional<std::string&> reasonIfUnsupported) const
1587 {
1588  IgnoreUnused(descriptor);
1589  bool supported = true;
1590  std::array<DataType,6> supportedTypes =
1591  {
1592  DataType::BFloat16,
1593  DataType::Float32,
1594  DataType::Float16,
1595  DataType::QAsymmU8,
1596  DataType::QAsymmS8,
1597  DataType::QSymmS16
1598  };
1599 
1600  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1601  "Reference Resize: input type not supported");
1602 
1603  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1604  "Reference Resize: output type not supported");
1605 
1606  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1607  "Reference Resize: input and output types not matching");
1608 
1609  return supported;
1610 }
1611 
1613  const TensorInfo& output,
1614  Optional<std::string&> reasonIfUnsupported) const
1615 {
1616  return IsElementwiseUnarySupported(input,
1617  output,
1618  ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
1619  reasonIfUnsupported);
1620 }
1621 
1622 bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
1623  const TensorInfo& output,
1624  const SliceDescriptor& descriptor,
1625  Optional<std::string&> reasonIfUnsupported) const
1626 {
1627  IgnoreUnused(descriptor);
1628  bool supported = true;
1629 
1630  std::array<DataType, 4> supportedTypes =
1631  {
1632  DataType::BFloat16,
1633  DataType::Float32,
1634  DataType::QAsymmU8,
1635  DataType::QSymmS16
1636  };
1637 
1638  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1639  "Reference Slice: input type not supported");
1640 
1641  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1642  "Reference Slice: output type not supported");
1643 
1644  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1645  "Reference Slice: input and output types are mismatched");
1646 
1647  return supported;
1648 }
1649 
1651  const TensorInfo& output,
1652  const SoftmaxDescriptor& descriptor,
1653  Optional<std::string&> reasonIfUnsupported) const
1654 {
1655  IgnoreUnused(descriptor);
1656  bool supported = true;
1657  std::array<DataType,7> supportedTypes =
1658  {
1659  DataType::BFloat16,
1660  DataType::Float32,
1661  DataType::Float16,
1662  DataType::QSymmS8,
1663  DataType::QAsymmS8,
1664  DataType::QAsymmU8,
1665  DataType::QSymmS16
1666  };
1667 
1668  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1669  "Reference Softmax: output type not supported");
1670 
1671  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1672  "Reference Softmax: input type not supported");
1673 
1674  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1675  "Reference Softmax: input type not supported");
1676 
1677  return supported;
1678 }
1679 
1681  const TensorInfo& output,
1682  const SpaceToBatchNdDescriptor& descriptor,
1683  Optional<std::string&> reasonIfUnsupported) const
1684 {
1685  IgnoreUnused(descriptor);
1686  bool supported = true;
1687  std::array<DataType,5> supportedTypes =
1688  {
1689  DataType::BFloat16,
1690  DataType::Float32,
1691  DataType::Float16,
1692  DataType::QAsymmU8,
1693  DataType::QSymmS16
1694  };
1695 
1696  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1697  "Reference SpaceToBatchNd: input type not supported");
1698 
1699  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1700  "Reference SpaceToBatchNd: output type not supported");
1701 
1702  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1703  "Reference SpaceToBatchNd: input and output types are mismatched");
1704 
1705  return supported;
1706 }
1707 
1709  const TensorInfo& output,
1710  const SpaceToDepthDescriptor& descriptor,
1711  Optional<std::string&> reasonIfUnsupported) const
1712 {
1713 
1714  IgnoreUnused(descriptor);
1715  bool supported = true;
1716 
1717  std::array<DataType,5> supportedTypes =
1718  {
1719  DataType::BFloat16,
1720  DataType::Float32,
1721  DataType::Float16,
1722  DataType::QAsymmU8,
1723  DataType::QSymmS16
1724  };
1725 
1726  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1727  "Reference SpaceToDepth: input type not supported");
1728 
1729  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1730  "Reference SpaceToDepth: output type not supported");
1731 
1732  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1733  "Reference SpaceToDepth: input and output types are mismatched");
1734 
1735  return supported;
1736 }
1737 
1739  const ViewsDescriptor& descriptor,
1740  Optional<std::string&> reasonIfUnsupported) const
1741 {
1742  IgnoreUnused(descriptor);
1743  bool supported = true;
1744  std::array<DataType,5> supportedTypes =
1745  {
1746  DataType::BFloat16,
1747  DataType::Float32,
1748  DataType::Float16,
1749  DataType::QAsymmU8,
1750  DataType::QSymmS16
1751  };
1752 
1753  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1754  "Reference splitter: input type not supported");
1755 
1756  return supported;
1757 }
1758 
1760  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1761  const ViewsDescriptor& descriptor,
1762  Optional<std::string&> reasonIfUnsupported) const
1763 {
1764  IgnoreUnused(descriptor);
1765  bool supported = true;
1766  std::array<DataType,5> supportedTypes =
1767  {
1768  DataType::BFloat16,
1769  DataType::Float32,
1770  DataType::Float16,
1771  DataType::QAsymmU8,
1772  DataType::QSymmS16
1773  };
1774 
1775  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1776  "Reference splitter: output type not supported");
1777  for (const TensorInfo output : outputs)
1778  {
1779  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1780  "Reference splitter: input type not supported");
1781 
1782  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1783  "Reference splitter: input and output types mismatched.");
1784  }
1785 
1786  return supported;
1787 }
1788 
1789 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1790  const TensorInfo& output,
1791  const StackDescriptor& descriptor,
1792  Optional<std::string&> reasonIfUnsupported) const
1793 {
1794  IgnoreUnused(descriptor);
1795 
1796  bool supported = true;
1797  std::array<DataType,5> supportedTypes =
1798  {
1799  DataType::BFloat16,
1800  DataType::Float32,
1801  DataType::Float16,
1802  DataType::QAsymmU8,
1803  DataType::QSymmS16
1804  };
1805 
1806  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1807  "Reference stack: output type not supported");
1808  for (const TensorInfo* input : inputs)
1809  {
1810  BOOST_ASSERT(input != nullptr);
1811  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
1812  "Reference stack: input type not supported");
1813 
1814  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
1815  "Reference stack: input and output types mismatched.");
1816  }
1817 
1818  return supported;
1819 }
1820 
1822  const TensorInfo& output,
1823  const StridedSliceDescriptor& descriptor,
1824  Optional<std::string&> reasonIfUnsupported) const
1825 {
1826  IgnoreUnused(descriptor);
1827  bool supported = true;
1828 
1829  std::array<DataType,4> supportedTypes =
1830  {
1831  DataType::BFloat16,
1832  DataType::Float32,
1833  DataType::QAsymmU8,
1834  DataType::QSymmS16
1835  };
1836 
1837  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1838  "Reference StridedSlice: input type not supported");
1839 
1840  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1841  "Reference StridedSlice: output type not supported");
1842 
1843  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1844  "Reference StridedSlice: input and output types are mismatched");
1845 
1846  return supported;
1847 }
1848 
1850  const TensorInfo& input1,
1851  const TensorInfo& output,
1852  Optional<std::string&> reasonIfUnsupported) const
1853 {
1854  bool supported = true;
1855 
1856  std::array<DataType,5> supportedTypes = {
1857  DataType::BFloat16,
1858  DataType::Float32,
1859  DataType::Float16,
1860  DataType::QAsymmU8,
1861  DataType::QSymmS16
1862  };
1863 
1864  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1865  "Reference subtraction: input 0 is not a supported type.");
1866 
1867  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1868  "Reference subtraction: input 1 is not a supported type.");
1869 
1870  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1871  "Reference subtraction: output is not a supported type.");
1872 
1873  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1874  "Reference subtraction: input 0 and Input 1 types are mismatched");
1875 
1876  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1877  "Reference subtraction: input and output types are mismatched");
1878 
1879  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1880  "Reference subtraction: shapes are not suitable for implicit broadcast.");
1881 
1882  return supported;
1883 }
1884 
1886  const TensorInfo& alpha,
1887  const TensorInfo& output,
1888  Optional<std::string&> reasonIfUnsupported) const
1889 {
1890  bool supported = true;
1891 
1892  std::array<DataType, 5> supportedTypes
1893  {
1894  DataType::BFloat16,
1895  DataType::Float32,
1896  DataType::Float16,
1897  DataType::QAsymmU8,
1898  DataType::QSymmS16
1899  };
1900 
1901  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1902  "PReLU: input is not a supported type.");
1903 
1904  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
1905  "PReLU: alpha is not a supported type.");
1906 
1907  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1908  "PReLU: output is not a supported type.");
1909 
1910  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
1911  "PReLU: input, alpha and output types are mismatched");
1912 
1913  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
1914  "PReLU: shapes are not suitable for implicit broadcast");
1915 
1916  return supported;
1917 }
1918 
1920  const TensorInfo& output,
1921  const TransposeConvolution2dDescriptor& descriptor,
1922  const TensorInfo& weights,
1923  const Optional<TensorInfo>& biases,
1924  Optional<std::string&> reasonIfUnsupported) const
1925 {
1926  IgnoreUnused(descriptor);
1927  bool supported = true;
1928 
1929  std::array<DataType,5> supportedTypes =
1930  {
1931  DataType::BFloat16,
1932  DataType::Float32,
1933  DataType::Float16,
1934  DataType::QAsymmU8,
1935  DataType::QSymmS16
1936  };
1937 
1938  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1939  "Reference TransposeConvolution2d: input is not a supported type.");
1940 
1941  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1942  "Reference TransposeConvolution2d: output is not a supported type.");
1943 
1944  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1945  "Reference TransposeConvolution2d: input and output types mismatched.");
1946 
1947 
1948  const DataType inputType = input.GetDataType();
1949  if (inputType == DataType::QAsymmU8)
1950  {
1952  std::array<DataType, 3> supportedWeightTypes =
1953  {
1954  DataType::QAsymmU8,
1955  DataType::QSymmS8,
1956  DataType::QuantizedSymm8PerAxis //Deprecated
1957  };
1959 
1960  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1961  "Reference TransposeConvolution2d: weights type not supported for "
1962  "quantized input.");
1963  }
1964  else
1965  {
1966  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1967  "Reference TransposeConvolution2d: weights is not a supported type.");
1968 
1969  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1970  "Reference TransposeConvolution2d: input and weights types mismatched.");
1971  }
1972 
1973  if (biases.has_value())
1974  {
1975  std::array<DataType,4> biasesSupportedTypes =
1976  {
1977  DataType::BFloat16,
1978  DataType::Float32,
1979  DataType::Float16,
1980  DataType::Signed32
1981  };
1982  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1983  "Reference TransposeConvolution2d: biases is not a supported type.");
1984  }
1985 
1986  return supported;
1987 }
1988 
1989 bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input,
1990  const TensorInfo& output,
1991  const TransposeDescriptor& descriptor,
1992  Optional<std::string&> reasonIfUnsupported) const
1993 {
1994  IgnoreUnused(descriptor);
1995  bool supported = true;
1996 
1997  // Define supported output and inputs types.
1998  std::array<DataType, 5> supportedTypes =
1999  {
2000  DataType::BFloat16,
2001  DataType::Float32,
2002  DataType::Float16,
2003  DataType::QAsymmU8,
2004  DataType::QSymmS16
2005  };
2006 
2007  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2008  "Reference transpose: input is not a supported type.");
2009 
2010  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2011  "Reference transpose: output is not a supported type.");
2012 
2013  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2014  "Reference transpose: input and output types are mismatched.");
2015 
2016  return supported;
2017 }
2018 
2019 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsDebugSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsEqualSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Copyright (c) 2020 ARM Limited.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
A ResizeDescriptor for the ResizeLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:241
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMemCopySupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
A PadDescriptor for the PadLayer.
DataType
Definition: Types.hpp:32
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsRsqrtSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType GetDataType() const
Definition: Tensor.hpp:95
An OriginsDescriptor for the ConcatLayer.
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsFakeQuantizationSupported(const BackendId &backend, const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_BiasEnabled
Enable/disable bias.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStackSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const StackDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
bool IsResizeBilinearSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
bool m_PeepholeEnabled
Enable/disable peephole.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
A SliceDescriptor for the SliceLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A MeanDescriptor for the MeanLayer.
bool IsMergerSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsGatherSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsDetectionPostProcessSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const DetectionPostProcessDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
A PermuteDescriptor for the PermuteLayer.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })