ArmNN  NotReleased
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/Descriptors.hpp>
11 
12 #include <LayerSupportCommon.hpp>
13 
15 
16 #include <boost/cast.hpp>
17 #include <boost/core/ignore_unused.hpp>
18 
19 #include <vector>
20 #include <array>
21 
22 using namespace boost;
23 
24 namespace armnn
25 {
26 
27 namespace
28 {
29 
30 template<typename Float32Func, typename Uint8Func, typename ... Params>
31 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
32  DataType dataType,
33  Float32Func floatFuncPtr,
34  Uint8Func uint8FuncPtr,
35  Params&&... params)
36 {
37  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
38  dataType,
39  &FalseFunc<Params...>,
40  floatFuncPtr,
41  uint8FuncPtr,
42  &FalseFunc<Params...>,
43  &FalseFunc<Params...>,
44  std::forward<Params>(params)...);
45 }
46 
47 } // anonymous namespace
48 
49 namespace
50 {
51 
52 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
53  unsigned int actual,
54  std::string& layerStr,
55  std::string& tensorName)
56 {
57  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
58  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
59 
60  return errorMsg;
61 }
62 
63 } // anonymous namespace
64 
65 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  return IsElementwiseUnarySupported(input,
69  output,
70  ElementwiseUnaryDescriptor(UnaryOperation::Abs),
71  reasonIfUnsupported);
72 }
73 
75  const TensorInfo& output,
76  const ActivationDescriptor& descriptor,
77  Optional<std::string&> reasonIfUnsupported) const
78 {
79  bool supported = true;
80 
81  // Define supported types.
82  std::array<DataType,6> supportedTypes = {
83  DataType::Float32,
84  DataType::Float16,
85  DataType::QAsymmS8,
86  DataType::QAsymmU8,
87  DataType::QSymmS16
88  };
89 
90  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
91  "Reference activation: input type not supported.");
92 
93  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
94  "Reference activation: output type not supported.");
95 
96  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
97  "Reference activation: input and output types mismatched.");
98 
99  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
100  "Reference activation: input and output shapes are of different rank.");
101 
102 
103  struct ActivationFunctionSupported : public Rule
104  {
105  ActivationFunctionSupported(const ActivationDescriptor& desc)
106  {
107  switch(desc.m_Function)
108  {
109  case ActivationFunction::Abs:
110  case ActivationFunction::BoundedReLu:
111  case ActivationFunction::LeakyReLu:
112  case ActivationFunction::Linear:
113  case ActivationFunction::ReLu:
114  case ActivationFunction::Sigmoid:
115  case ActivationFunction::SoftReLu:
116  case ActivationFunction::Sqrt:
117  case ActivationFunction::Square:
118  case ActivationFunction::TanH:
119  {
120  m_Res = true;
121  break;
122  }
123  default:
124  {
125  m_Res = false;
126  break;
127  }
128  }
129  }
130  };
131 
132  // Function is supported
133  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
134  "Reference activation: function not supported.");
135 
136  return supported;
137 }
138 
140  const TensorInfo& input1,
141  const TensorInfo& output,
142  Optional<std::string&> reasonIfUnsupported) const
143 {
144  bool supported = true;
145 
146  std::array<DataType,6> supportedTypes = {
147  DataType::Float32,
148  DataType::Float16,
149  DataType::QAsymmS8,
150  DataType::QAsymmU8,
151  DataType::QSymmS16
152  };
153 
154  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
155  "Reference addition: input 0 is not a supported type.");
156 
157  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
158  "Reference addition: input 1 is not a supported type.");
159 
160  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
161  "Reference addition: output is not a supported type.");
162 
163  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
164  "Reference addition: input 0 and Input 1 types are mismatched");
165 
166  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
167  "Reference addition: input and output types are mismatched");
168 
169  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
170  "Reference addition: shapes are not suitable for implicit broadcast.");
171 
172  return supported;
173 }
174 
176  const armnn::ArgMinMaxDescriptor &descriptor,
177  armnn::Optional<std::string &> reasonIfUnsupported) const
178 {
179  ignore_unused(descriptor);
180 
181  std::array<DataType, 4> supportedTypes =
182  {
183  DataType::Float32,
184  DataType::QAsymmU8,
185  DataType::QSymmS16,
186  DataType::Signed32
187  };
188 
189  bool supported = true;
190 
191  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
192  "Reference ArgMinMax: input is not a supported type.");
193  supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
194  "Reference ArgMinMax: output type not supported");
195 
196  return supported;
197 }
198 
200  const TensorInfo& output,
201  const TensorInfo& mean,
202  const TensorInfo& variance,
203  const TensorInfo& beta,
204  const TensorInfo& gamma,
205  const BatchNormalizationDescriptor& descriptor,
206  Optional<std::string&> reasonIfUnsupported) const
207 {
208  ignore_unused(descriptor);
209 
210  std::array<DataType, 4> supportedTypes =
211  {
212  DataType::Float32,
213  DataType::Float16,
214  DataType::QAsymmU8,
215  DataType::QSymmS16
216  };
217 
218  bool supported = true;
219 
220  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
221  "Reference batch normalization: input is not a supported type.");
222 
223  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
224  "Reference batch normalization: output is not a supported type.");
225 
226  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
227  "Reference batch normalization: input and output types are mismatched");
228 
229  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
230  "Reference batch normalization: mean is not a supported type.");
231 
232  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
233  "Reference batch normalization: variance is not a supported type.");
234 
235  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
236  "Reference batch normalization: beta is not a supported type.");
237 
238  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
239  "Reference batch normalization: gamma is not a supported type.");
240 
241  return supported;
242 }
243 
245  const TensorInfo& output,
246  const BatchToSpaceNdDescriptor& descriptor,
247  Optional<std::string&> reasonIfUnsupported) const
248 {
249  ignore_unused(descriptor);
250 
251  bool supported = true;
252 
253  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
254  std::string inputTensorStr = "input";
255  std::string outputTensorStr = "output";
256 
257  // Define supported types.
258  std::array<DataType,4> supportedTypes =
259  {
260  DataType::Float32,
261  DataType::Float16,
262  DataType::QAsymmU8,
263  DataType::QSymmS16
264  };
265 
266  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
267  "Reference BatchToSpaceNd: input type not supported.");
268 
269  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
270  "Reference BatchToSpaceNd: output type not supported.");
271 
272  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
273  "Reference BatchToSpaceNd: input and output types mismatched.");
274 
275  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
276  reasonIfUnsupported,
277  CreateIncorrectDimensionsErrorMsg(4,
278  output.GetNumDimensions(),
279  batchToSpaceNdLayerStr,
280  outputTensorStr).data());
281 
282  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
283  reasonIfUnsupported,
284  CreateIncorrectDimensionsErrorMsg(4,
285  input.GetNumDimensions(),
286  batchToSpaceNdLayerStr,
287  inputTensorStr).data());
288 
289  return supported;
290 }
291 
292 bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
293  const TensorInfo& input1,
294  const TensorInfo& output,
295  const ComparisonDescriptor& descriptor,
296  Optional<std::string&> reasonIfUnsupported) const
297 {
298  boost::ignore_unused(descriptor);
299 
300  std::array<DataType, 4> supportedInputTypes =
301  {
302  DataType::Float32,
303  DataType::Float16,
304  DataType::QAsymmU8,
305  DataType::QSymmS16
306  };
307 
308  bool supported = true;
309  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
310  "Reference comparison: input 0 is not a supported type");
311 
312  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
313  "Reference comparison: input 0 and Input 1 types are mismatched");
314 
315  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
316  "Reference comparison: output is not of type Boolean");
317 
318  return supported;
319 }
320 
321 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
322  const TensorInfo& output,
323  const ConcatDescriptor& descriptor,
324  Optional<std::string&> reasonIfUnsupported) const
325 {
326  ignore_unused(descriptor);
327 
328  bool supported = true;
329  std::array<DataType,5> supportedTypes =
330  {
331  DataType::Float32,
332  DataType::Float16,
333  DataType::QAsymmU8,
334  DataType::QAsymmS8,
335  DataType::QSymmS16
336  };
337 
338  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
339  "Reference concatenation: output type not supported");
340  for (const TensorInfo* input : inputs)
341  {
342  BOOST_ASSERT(input != nullptr);
343  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
344  "Reference concatenation: input type not supported");
345 
346  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
347  "Reference concatenation: input and output types mismatched.");
348  }
349 
350  return supported;
351 }
352 
354  Optional<std::string&> reasonIfUnsupported) const
355 {
356  std::array<DataType,6> supportedTypes =
357  {
358  DataType::Float32,
359  DataType::Signed32,
360  DataType::QAsymmU8,
361  DataType::QAsymmS8,
362  DataType::QSymmS8,
363  DataType::QSymmS16
364  };
365 
366  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
367  "Reference constant: output is not a supported type.");
368 }
369 
371  const TensorInfo& output,
372  Optional<std::string&> reasonIfUnsupported) const
373 {
374  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
375  input.GetDataType(),
376  &TrueFunc<>,
377  &FalseInputFuncF32<>,
378  &FalseFuncU8<>,
379  &FalseFuncI32<>,
380  &FalseFuncU8<>) &&
381  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
382  output.GetDataType(),
383  &FalseOutputFuncF16<>,
384  &TrueFunc<>,
385  &FalseFuncU8<>,
386  &FalseFuncI32<>,
387  &FalseFuncU8<>));
388 }
389 
391  const TensorInfo& output,
392  Optional<std::string&> reasonIfUnsupported) const
393 {
394  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
395  input.GetDataType(),
396  &FalseInputFuncF16<>,
397  &TrueFunc<>,
398  &FalseFuncU8<>,
399  &FalseFuncI32<>,
400  &FalseFuncU8<>) &&
401  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
402  output.GetDataType(),
403  &TrueFunc<>,
404  &FalseOutputFuncF32<>,
405  &FalseFuncU8<>,
406  &FalseFuncI32<>,
407  &FalseFuncU8<>));
408 }
409 
411  const TensorInfo& output,
412  const Convolution2dDescriptor& descriptor,
413  const TensorInfo& weights,
414  const Optional<TensorInfo>& biases,
415  Optional<std::string&> reasonIfUnsupported) const
416 {
417  bool supported = true;
418 
419  // Define supported types.
420  std::array<DataType,6> supportedTypes =
421  {
422  DataType::Float32,
423  DataType::Float16,
424  DataType::QAsymmU8,
425  DataType::QAsymmS8,
426  DataType::QSymmS8,
427  DataType::QSymmS16
428  };
429 
430  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
431  "Reference Convolution2d: input is not a supported type.");
432 
433  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
434  "Reference Convolution2d: output is not a supported type.");
435 
436  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
437  "Reference Convolution2d: input and output types mismatched.");
438 
439  const DataType inputType = input.GetDataType();
440  if (IsQuantized8BitType(inputType))
441  {
443  std::array<DataType, 4> supportedWeightTypes =
444  {
445  DataType::QAsymmU8,
446  DataType::QSymmS8,
447  DataType::QAsymmS8,
448  DataType::QuantizedSymm8PerAxis // deprecated
449  };
451 
452  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
453  "Reference Convolution2d: weights type not supported for quantized input.");
454  }
455  else
456  {
457  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
458  "Reference Convolution2d: weights is not a supported type.");
459 
460  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
461  "Reference Convolution2d: input and weights types mismatched.");
462  }
463 
464  if (biases.has_value())
465  {
466  std::array<DataType,3> biasesSupportedTypes =
467  {
468  DataType::Float32,
469  DataType::Float16,
470  DataType::Signed32
471  };
472 
473  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
474  "Reference Convolution2d: biases is not a supported type.");
475  }
476  ignore_unused(descriptor);
477 
478  return supported;
479 }
480 
482  const TensorInfo& output,
483  Optional<std::string&> reasonIfUnsupported) const
484 {
485  bool supported = true;
486 
487  std::array<DataType, 7> supportedTypes =
488  {
489  DataType::Float16,
490  DataType::Float32,
491  DataType::QAsymmU8,
492  DataType::QAsymmS8,
493  DataType::QSymmS8,
494  DataType::QSymmS16,
495  DataType::Signed32
496  };
497 
498  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
499  "Reference for Debug layer: input type not supported");
500 
501  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
502  "Reference for Debug layer: output type not supported");
503 
504  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
505  "Reference for Debug layer: input and output types are mismatched");
506 
507  return supported;
508 }
509 
510 bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
511  const TensorInfo& output,
512  const DepthToSpaceDescriptor& descriptor,
513  Optional<std::string&> reasonIfUnsupported) const
514 {
515  ignore_unused(descriptor);
516  bool supported = true;
517 
518  std::array<DataType,4> supportedTypes =
519  {
520  DataType::Float32,
521  DataType::Float16,
522  DataType::QAsymmU8,
523  DataType::QSymmS16
524  };
525 
526  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
527  "Reference DepthToSpace: input type not supported");
528 
529  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
530  "Reference DepthToSpace: output type not supported");
531 
532  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
533  "Reference DepthToSpace: input and output types are mismatched");
534 
535  return supported;
536 }
537 
539  const TensorInfo& output,
540  const DepthwiseConvolution2dDescriptor& descriptor,
541  const TensorInfo& weights,
542  const Optional<TensorInfo>& biases,
543  Optional<std::string&> reasonIfUnsupported) const
544 {
545  bool supported = true;
546 
547  // Define supported types.
548  std::array<DataType,6> supportedTypes =
549  {
550  DataType::Float32,
551  DataType::Float16,
552  DataType::QSymmS8,
553  DataType::QAsymmS8,
554  DataType::QAsymmU8,
555  DataType::QSymmS16
556  };
557 
558  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
559  "Reference DepthwiseConvolution2d: input is not a supported type.");
560 
561  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
562  "Reference DepthwiseConvolution2d: output is not a supported type.");
563 
564  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
565  "Reference DepthwiseConvolution2d: input and output types mismatched.");
566 
568  std::array<DataType, 3> supportedWeightTypes =
569  {
570  DataType::QAsymmU8,
571  DataType::QSymmS8,
572  DataType::QuantizedSymm8PerAxis // deprecated
573  };
575 
576  const DataType inputType = input.GetDataType();
577  if (IsQuantized8BitType(inputType))
578  {
579 
580  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
581  "Reference convolution2d: weights type not supported for quantized input.");
582  }
583  else
584  {
585  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
586  "Reference DepthwiseConvolution2d: weights is not a supported type.");
587 
588  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
589  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
590  }
591 
592  if (biases.has_value())
593  {
594  std::array<DataType,3> biasesSupportedTypes =
595  {
596  DataType::Float32,
597  DataType::Float16,
598  DataType::Signed32
599  };
600  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
601  "Reference DepthwiseConvolution2d: biases is not a supported type.");
602  }
603  ignore_unused(descriptor);
604 
605  return supported;
606 
607 }
608 
610  const TensorInfo& output,
611  Optional<std::string&> reasonIfUnsupported) const
612 {
613  bool supported = true;
614 
615  std::array<DataType,4> supportedInputTypes = {
616  DataType::QAsymmS8,
617  DataType::QAsymmU8,
618  DataType::QSymmS8,
619  DataType::QSymmS16
620  };
621 
622  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
623  "Reference for Dequantize layer: input type not supported.");
624 
625  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
626  "Reference for Dequantize layer: per-axis quantized input not support .");
627 
628  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
629  "Reference dequantize: per-axis quantized input not support .");
630 
631  std::array<DataType,2> supportedOutputTypes = {
632  DataType::Float32,
633  DataType::Float16
634  };
635 
636  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
637  "Reference for Dequantize layer: output type not supported.");
638 
639  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
640  "Reference for Dequantize layer: input/output shapes have different num total "
641  "elements.");
642 
643  return supported;
644 }
645 
647  const TensorInfo& scores,
648  const TensorInfo& anchors,
649  const TensorInfo& detectionBoxes,
650  const TensorInfo& detectionClasses,
651  const TensorInfo& detectionScores,
652  const TensorInfo& numDetections,
653  const DetectionPostProcessDescriptor& descriptor,
654  Optional<std::string&> reasonIfUnsupported) const
655 {
656  boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
657 
658  bool supported = true;
659 
660  std::array<DataType,3> supportedInputTypes =
661  {
662  DataType::Float32,
663  DataType::QAsymmU8,
664  DataType::QSymmS16
665  };
666 
667  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
668  "Reference DetectionPostProcess: input 0 is not a supported type.");
669 
670  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
671  "Reference DetectionPostProcess: input 1 is not a supported type.");
672 
673  return supported;
674 }
675 
676 bool RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
677  const TensorInfo& output,
678  const DepthwiseConvolution2dDescriptor& descriptor,
679  const TensorInfo& weights,
680  const Optional<TensorInfo>& biases,
681  Optional<std::string&> reasonIfUnsupported) const
682 {
683  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
684 }
685 
687  const TensorInfo& input1,
688  const TensorInfo& output,
689  Optional<std::string&> reasonIfUnsupported) const
690 {
691  bool supported = true;
692 
693  std::array<DataType,4> supportedTypes = {
694  DataType::Float32,
695  DataType::Float16,
696  DataType::QAsymmU8,
697  DataType::QSymmS16
698  };
699 
700  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
701  "Reference division: input 0 is not a supported type.");
702 
703  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
704  "Reference division: input 1 is not a supported type.");
705 
706  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
707  "Reference division: output is not a supported type.");
708 
709  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
710  "Reference division: input 0 and Input 1 types are mismatched");
711 
712  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
713  "Reference division: input and output types are mismatched");
714 
715  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
716  "Reference division: shapes are not suitable for implicit broadcast.");
717 
718  return supported;
719 }
720 
721 bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
722  const TensorInfo& output,
723  const ElementwiseUnaryDescriptor& descriptor,
724  Optional<std::string&> reasonIfUnsupported) const
725 {
726  boost::ignore_unused(descriptor);
727 
728  std::array<DataType, 4> supportedTypes =
729  {
730  DataType::Float32,
731  DataType::Float16,
732  DataType::QAsymmU8,
733  DataType::QSymmS16
734  };
735 
736  bool supported = true;
737 
738  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
739  "Reference elementwise unary: input type not supported");
740 
741  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
742  "Reference elementwise unary: output type not supported");
743 
744  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
745  "Reference elementwise unary: input and output types not matching");
746 
747  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
748  "Reference elementwise unary: input and output shapes"
749  "have different number of total elements");
750 
751  return supported;
752 }
753 
755  const TensorInfo& input1,
756  const TensorInfo& output,
757  Optional<std::string&> reasonIfUnsupported) const
758 {
759  return IsComparisonSupported(input0,
760  input1,
761  output,
762  ComparisonDescriptor(ComparisonOperation::Equal),
763  reasonIfUnsupported);
764 }
765 
767  const FakeQuantizationDescriptor& descriptor,
768  Optional<std::string&> reasonIfUnsupported) const
769 {
770  ignore_unused(descriptor);
771  bool supported = true;
772 
773  std::array<DataType,1> supportedTypes =
774  {
775  DataType::Float32
776  };
777 
778  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
779  "Reference fake quantization: input type not supported.");
780 
781  return supported;
782 }
783 
785  const TensorInfo& output,
786  Optional<std::string&> reasonIfUnsupported) const
787 {
788  ignore_unused(output);
789  bool supported = true;
790 
791  std::array<DataType,3> supportedTypes =
792  {
793  DataType::Float32,
794  DataType::Float16,
795  DataType::QSymmS16
796  };
797 
798  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
799  "Reference Floor: input type not supported.");
800 
801  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
802  "Reference Floor: output type not supported.");
803 
804  return supported;
805 }
806 
808  const TensorInfo& output,
809  const TensorInfo& weights,
810  const TensorInfo& biases,
811  const FullyConnectedDescriptor& descriptor,
812  Optional<std::string&> reasonIfUnsupported) const
813 {
814  bool supported = true;
815 
816  // Define supported types.
817  std::array<DataType,4> supportedTypes =
818  {
819  DataType::Float32,
820  DataType::Float16,
821  DataType::QAsymmU8,
822  DataType::QSymmS16
823  };
824 
825  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
826  "Reference Fully Connected: input type not supported.");
827 
828  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
829  "Reference Fully Connected: output type not supported.");
830 
831  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
832  "Reference Fully Connected: input and output types mismatched.");
833 
834  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
835  "Reference Fully Connected: weights type not supported.");
836 
837  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
838  "Reference Fully Connected: input and weight types mismatched.");
839 
840  if (descriptor.m_BiasEnabled)
841  {
842  // Defined supported types for bias
843  std::array<DataType, 3>
844  supportedBiasTypes =
845  {
846  DataType::Float32,
847  DataType::Float16,
848  DataType::Signed32
849  };
850 
851  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
852  "Reference Fully Connected: bias type not supported.");
853 
854  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
855  "Reference Fully Connected: bias and weight types mismatch.");
856 
857  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
858  "Reference Fully Connected: bias type inferred from weights is incompatible.");
859 
860  }
861 
862  return supported;
863 }
864 
866  const armnn::TensorInfo& input1,
867  const armnn::TensorInfo& output,
868  armnn::Optional<std::string&> reasonIfUnsupported) const
869 {
870  bool supported = true;
871  std::array<DataType,4> supportedTypes =
872  {
873  DataType::Float32,
874  DataType::Float16,
875  DataType::QAsymmU8,
876  DataType::QSymmS16
877  };
878 
879  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
880  "Reference Gather: input type not supported");
881 
882  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
883  "Reference Gather: output type not supported");
884 
885  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
886  "Reference Gather: indices (input1) type not supported");
887 
888  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
889  "Reference Gather: input and output types not matching");
890 
891  return supported;
892 }
893 
895  const TensorInfo& input1,
896  const TensorInfo& output,
897  Optional<std::string&> reasonIfUnsupported) const
898 {
899  return IsComparisonSupported(input0,
900  input1,
901  output,
902  ComparisonDescriptor(ComparisonOperation::Greater),
903  reasonIfUnsupported);
904 }
905 
907  Optional<std::string&> /*reasonIfUnsupported*/) const
908 {
909  return true;
910 }
911 
912 bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
913  const TensorInfo& output,
914  const InstanceNormalizationDescriptor& descriptor,
915  Optional<std::string&> reasonIfUnsupported) const
916 {
917  ignore_unused(descriptor);
918  // Define supported types
919  std::array<DataType, 4> supportedTypes =
920  {
921  DataType::Float32,
922  DataType::Float16
923  };
924 
925  bool supported = true;
926 
927  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
928  "Reference Instance Normalization: input type not supported.");
929 
930  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
931  "Reference Instance Normalization: output type not supported.");
932 
933  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
934  "Reference Instance Normalization: input and output types mismatched.");
935 
936  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
937  "Reference Instance Normalization: input and output shapes have different "
938  "num total elements.");
939 
940  return supported;
941 }
942 
944  const TensorInfo& output,
945  const L2NormalizationDescriptor& descriptor,
946  Optional<std::string&> reasonIfUnsupported) const
947 {
948  ignore_unused(descriptor);
949  // Define supported types
950  std::array<DataType, 4> supportedTypes =
951  {
952  DataType::Float32,
953  DataType::Float16,
954  DataType::QAsymmU8,
955  DataType::QSymmS16
956  };
957 
958  bool supported = true;
959 
960  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
961  "Reference L2normalization: input type not supported.");
962 
963  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
964  "Reference L2normalization: output type not supported.");
965 
966  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
967  "Reference L2normalization: input and output types mismatched.");
968 
969  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
970  "Reference L2normalization: input and output shapes have different "
971  "num total elements.");
972 
973  return supported;
974 }
975 
976 bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
977  const TensorInfo& output,
978  const LogSoftmaxDescriptor& descriptor,
979  Optional<std::string&> reasonIfUnsupported) const
980 {
981  ignore_unused(descriptor);
982 
983  std::array<DataType, 2> supportedTypes =
984  {
985  DataType::Float32,
986  DataType::Float16
987  };
988 
989  bool supported = true;
990  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
991  "Reference LogSoftmax: input type not supported");
992 
993  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
994  "Reference LogSoftmax: output type not supported");
995 
996  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
997  "Reference LogSoftmax: input and output types do not match");
998 
999  return supported;
1000 }
1001 
1003  const TensorInfo& outputStateIn,
1004  const TensorInfo& cellStateIn,
1005  const TensorInfo& scratchBuffer,
1006  const TensorInfo& outputStateOut,
1007  const TensorInfo& cellStateOut,
1008  const TensorInfo& output,
1009  const LstmDescriptor& descriptor,
1010  const LstmInputParamsInfo& paramsInfo,
1011  Optional<std::string&> reasonIfUnsupported) const
1012 {
1013  ignore_unused(descriptor);
1014  ignore_unused(paramsInfo);
1015 
1016  bool supported = true;
1017 
1018  std::array<DataType,2> supportedTypes = {
1019  DataType::Float32,
1020  DataType::QSymmS16
1021  };
1022 
1023  // check inputs and outputs
1024  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1025  "Reference Lstm: input is not a supported type.");
1026  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1027  "Reference Lstm: input and outputStateIn types are mismatched");
1028  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1029  "Reference Lstm: input and cellStateIn types are mismatched");
1030  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1031  "Reference Lstm: input and scratchBuffer types are mismatched");
1032  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1033  "Reference Lstm: input and outputStateOut types are mismatched");
1034  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1035  "Reference Lstm: input and cellStateOut types are mismatched");
1036  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1037  "Reference Lstm: input and output types are mismatched");
1038  // check layer parameters
1039  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1040  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1041  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1042  "Reference Lstm: input and InputToCellWeights types are mismatched");
1043  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1044  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1045  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1046  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1047  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1048  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1049  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1050  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1051  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1052  "Reference Lstm: input and ForgetGateBias types are mismatched");
1053  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1054  "Reference Lstm: input and CellBias types are mismatched");
1055  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1056  "Reference Lstm: input and OutputGateBias types are mismatched");
1057  if (!descriptor.m_CifgEnabled)
1058  {
1059  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1060  "Reference Lstm: input and InputToInputWeights types are mismatched");
1061  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1062  reasonIfUnsupported,
1063  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1064  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1065  "Reference Lstm: input and InputGateBias types are mismatched");
1066  if (descriptor.m_PeepholeEnabled)
1067  {
1068  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1069  reasonIfUnsupported,
1070  "Reference Lstm: input and CellToInputWeights types are mismatched");
1071  }
1072  }
1073  if (descriptor.m_PeepholeEnabled)
1074  {
1075  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1076  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1077  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1078  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1079  }
1080  if (descriptor.m_ProjectionEnabled)
1081  {
1082  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1083  "Reference Lstm: input and mProjectionWeights types are mismatched");
1084  if (paramsInfo.m_ProjectionBias != nullptr)
1085  {
1086  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1087  "Reference Lstm: input and ProjectionBias types are mismatched");
1088  }
1089  }
1090  if (descriptor.m_LayerNormEnabled)
1091  {
1092  if (!descriptor.m_CifgEnabled)
1093  {
1094  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1095  reasonIfUnsupported,
1096  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1097  }
1098  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1099  reasonIfUnsupported,
1100  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1101  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1102  reasonIfUnsupported,
1103  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1104  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1105  reasonIfUnsupported,
1106  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1107  }
1108 
1109  return supported;
1110 }
1111 
1113  const TensorInfo& input1,
1114  const TensorInfo& output,
1115  Optional<std::string&> reasonIfUnsupported) const
1116 {
1117  bool supported = true;
1118 
1119  std::array<DataType,5> supportedTypes = {
1120  DataType::Float32,
1121  DataType::Float16,
1122  DataType::QAsymmS8,
1123  DataType::QAsymmU8,
1124  DataType::QSymmS16
1125  };
1126 
1127  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1128  "Reference maximum: input 0 is not a supported type.");
1129 
1130  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1131  "Reference maximum: input 1 is not a supported type.");
1132 
1133  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1134  "Reference maximum: output is not a supported type.");
1135 
1136  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1137  "Reference maximum: input 0 and Input 1 types are mismatched");
1138 
1139  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1140  "Reference maximum: input and output types are mismatched");
1141 
1142  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1143  "Reference maximum: shapes are not suitable for implicit broadcast.");
1144 
1145  return supported;
1146 }
1147 
1149  const TensorInfo& output,
1150  const MeanDescriptor& descriptor,
1151  Optional<std::string&> reasonIfUnsupported) const
1152 {
1153  bool supported = true;
1154  std::string meanLayerStr = "Mean";
1155  std::string outputTensorStr = "output";
1156 
1157  std::array<DataType,4> supportedTypes =
1158  {
1159  DataType::Float32,
1160  DataType::Float16,
1161  DataType::QAsymmU8,
1162  DataType::QSymmS16
1163  };
1164 
1165  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1166  "Reference Mean: input type not supported.");
1167 
1168  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1169  "Reference Mean: input and output types are mismatched");
1170 
1171  if (descriptor.m_KeepDims)
1172  {
1173  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1174  reasonIfUnsupported,
1175  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1176  output.GetNumDimensions(),
1177  meanLayerStr, outputTensorStr).data());
1178  }
1179  else if (descriptor.m_Axis.empty())
1180  {
1181  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1182  reasonIfUnsupported,
1183  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1184  meanLayerStr, outputTensorStr).data());
1185  }
1186  else
1187  {
1188  auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1189 
1190  if (outputDim > 0)
1191  {
1192  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1193  reasonIfUnsupported,
1194  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1195  meanLayerStr, outputTensorStr).data());
1196  }
1197  else
1198  {
1199  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1200  reasonIfUnsupported,
1201  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1202  meanLayerStr, outputTensorStr).data());
1203  }
1204  }
1205 
1206  return supported;
1207 }
1208 
1209 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
1210  const TensorInfo& output,
1211  const MergerDescriptor& descriptor,
1212  Optional<std::string&> reasonIfUnsupported) const
1213 {
1214  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1215 }
1216 
1218  const TensorInfo &output,
1219  Optional<std::string &> reasonIfUnsupported) const
1220 {
1221  bool supported = true;
1222 
1223  std::array<DataType,5> supportedTypes =
1224  {
1225  DataType::Float32,
1226  DataType::Float16,
1227  DataType::QAsymmU8,
1228  DataType::QSymmS16,
1229  DataType::Boolean
1230  };
1231 
1232  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1233  "Reference MemCopy: input type not supported");
1234 
1235  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1236  "Reference MemCopy: output type not supported");
1237 
1238  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1239  "Reference MemCopy: input and output types are mismatched");
1240 
1241  return supported;
1242 }
1243 
1245  const TensorInfo& input1,
1246  const TensorInfo& output,
1247  Optional<std::string&> reasonIfUnsupported) const
1248 {
1249  bool supported = true;
1250 
1251  std::array<DataType,4> supportedTypes = {
1252  DataType::Float32,
1253  DataType::Float16,
1254  DataType::QAsymmU8,
1255  DataType::QSymmS16
1256  };
1257 
1258  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1259  "Reference minimum: input 0 is not a supported type.");
1260 
1261  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1262  "Reference minimum: input 1 is not a supported type.");
1263 
1264  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1265  "Reference minimum: output is not a supported type.");
1266 
1267  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1268  "Reference minimum: input 0 and Input 1 types are mismatched");
1269 
1270  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1271  "Reference minimum: input and output types are mismatched");
1272 
1273  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1274  "Reference minimum: shapes are not suitable for implicit broadcast.");
1275 
1276  return supported;
1277 }
1278 
1280  const TensorInfo& input1,
1281  const TensorInfo& output,
1282  Optional<std::string&> reasonIfUnsupported) const
1283 {
1284  bool supported = true;
1285 
1286  std::array<DataType,6> supportedTypes = {
1287  DataType::Float32,
1288  DataType::Float16,
1289  DataType::QAsymmU8,
1290  DataType::QAsymmS8,
1291  DataType::QSymmS16
1292  };
1293 
1294  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1295  "Reference multiplication: input 0 is not a supported type.");
1296 
1297  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1298  "Reference multiplication: input 1 is not a supported type.");
1299 
1300  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1301  "Reference multiplication: output is not a supported type.");
1302 
1303  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1304  "Reference multiplication: input 0 and Input 1 types are mismatched");
1305 
1306  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1307  "Reference multiplication: input and output types are mismatched");
1308 
1309  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1310  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1311 
1312  return supported;
1313 }
1314 
1316  const TensorInfo& output,
1317  const NormalizationDescriptor& descriptor,
1318  Optional<std::string&> reasonIfUnsupported) const
1319 {
1320  ignore_unused(descriptor);
1321 
1322  // Define supported types
1323  std::array<DataType, 4> supportedTypes =
1324  {
1325  DataType::Float16,
1326  DataType::Float32,
1327  DataType::QAsymmU8,
1328  DataType::QSymmS16
1329  };
1330 
1331  bool supported = true;
1332 
1333  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1334  "Reference normalization: input type not supported.");
1335 
1336  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1337  "Reference normalization: output type not supported.");
1338 
1339  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1340  "Reference normalization: input and output shapes have different "
1341  "num total elements.");
1342 
1343  return supported;
1344 }
1345 
1347  Optional<std::string&> /*reasonIfUnsupported*/) const
1348 {
1349  return true;
1350 }
1351 
1353  const TensorInfo& output,
1354  const PadDescriptor& descriptor,
1355  Optional<std::string&> reasonIfUnsupported) const
1356 {
1357  ignore_unused(descriptor);
1358  bool supported = true;
1359 
1360  // Define supported output and inputs types.
1361  std::array<DataType,4> supportedTypes =
1362  {
1363  DataType::Float32,
1364  DataType::Float16,
1365  DataType::QAsymmU8,
1366  DataType::QSymmS16
1367  };
1368 
1369  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1370  "Reference pad: input is not a supported type.");
1371 
1372  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1373  "Reference pad: output is not a supported type.");
1374 
1375  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1376  "Reference pad: input and output types are mismatched.");
1377 
1378  return supported;
1379 }
1380 
1382  const TensorInfo& output,
1383  const PermuteDescriptor& descriptor,
1384  Optional<std::string&> reasonIfUnsupported) const
1385 {
1386  ignore_unused(descriptor);
1387  bool supported = true;
1388 
1389  // Define supported output and inputs types.
1390  std::array<DataType,3> supportedTypes =
1391  {
1392  DataType::Float32,
1393  DataType::QAsymmU8,
1394  DataType::QSymmS16
1395  };
1396 
1397  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1398  "Reference permute: input is not a supported type.");
1399 
1400  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1401  "Reference permute: output is not a supported type.");
1402 
1403  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1404  "Reference permute: input and output types are mismatched.");
1405 
1406  return supported;
1407 }
1408 
1410  const TensorInfo& output,
1411  const Pooling2dDescriptor& descriptor,
1412  Optional<std::string&> reasonIfUnsupported) const
1413 {
1414  ignore_unused(descriptor);
1415  bool supported = true;
1416 
1417  // Define supported output and inputs types.
1418  std::array<DataType,5> supportedTypes =
1419  {
1420  DataType::Float32,
1421  DataType::Float16,
1422  DataType::QAsymmS8,
1423  DataType::QAsymmU8,
1424  DataType::QSymmS16
1425  };
1426 
1427  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1428  "Reference poolind2d: input is not a supported type.");
1429 
1430  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1431  "Reference poolind2d: output is not a supported type.");
1432 
1433  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1434  "Reference poolind2d: input and output types are mismatched.");
1435 
1436  return supported;
1437 }
1438 
1440  const TensorInfo& output,
1441  Optional<std::string&> reasonIfUnsupported) const
1442 {
1443  bool supported = true;
1444 
1445  // Define supported input types.
1446  std::array<DataType,6> supportedInputTypes = {
1447  DataType::Float32,
1448  DataType::Float16,
1449  DataType::QAsymmS8,
1450  DataType::QAsymmU8,
1451  DataType::QSymmS8,
1452  DataType::QSymmS16
1453  };
1454 
1455  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1456  "Reference quantize: input type not supported.");
1457 
1458  // Define supported output types.
1459  std::array<DataType,4> supportedOutputTypes = {
1460  DataType::QAsymmU8,
1461  DataType::QAsymmS8,
1462  DataType::QSymmS8,
1463  DataType::QSymmS16
1464  };
1465  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1466  "Reference quantize: output type not supported.");
1467 
1468  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1469  "Reference quantize: input and output shapes have different num total elements.");
1470 
1471  return supported;
1472 }
1473 
1475  const TensorInfo& output,
1476  const ReshapeDescriptor& descriptor,
1477  Optional<std::string&> reasonIfUnsupported) const
1478 {
1479  ignore_unused(output);
1480  ignore_unused(descriptor);
1481  // Define supported output types.
1482  std::array<DataType,7> supportedOutputTypes =
1483  {
1484  DataType::Float32,
1485  DataType::Float16,
1486  DataType::Signed32,
1487  DataType::QAsymmS8,
1488  DataType::QAsymmU8,
1489  DataType::QSymmS16
1490  };
1491 
1492  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1493  "Reference reshape: input type not supported.");
1494 }
1495 
1497  const TensorInfo& output,
1498  Optional<std::string&> reasonIfUnsupported) const
1499 {
1500  bool supported = true;
1501  std::array<DataType,4> supportedTypes =
1502  {
1503  DataType::Float32,
1504  DataType::Float16,
1505  DataType::QAsymmU8,
1506  DataType::QSymmS16
1507  };
1508 
1509  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1510  "Reference ResizeBilinear: input type not supported");
1511 
1512  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1513  "Reference ResizeBilinear: output type not supported");
1514 
1515  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1516  "Reference ResizeBilinear: input and output types not matching");
1517 
1518  return supported;
1519 }
1520 
1522  const TensorInfo& output,
1523  const ResizeDescriptor& descriptor,
1524  Optional<std::string&> reasonIfUnsupported) const
1525 {
1526  boost::ignore_unused(descriptor);
1527  bool supported = true;
1528  std::array<DataType,5> supportedTypes =
1529  {
1530  DataType::Float32,
1531  DataType::Float16,
1532  DataType::QAsymmU8,
1533  DataType::QAsymmS8,
1534  DataType::QSymmS16
1535  };
1536 
1537  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1538  "Reference Resize: input type not supported");
1539 
1540  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1541  "Reference Resize: output type not supported");
1542 
1543  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1544  "Reference Resize: input and output types not matching");
1545 
1546  return supported;
1547 }
1548 
1550  const TensorInfo& output,
1551  Optional<std::string&> reasonIfUnsupported) const
1552 {
1553  return IsElementwiseUnarySupported(input,
1554  output,
1555  ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
1556  reasonIfUnsupported);
1557 }
1558 
1559 bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
1560  const TensorInfo& output,
1561  const SliceDescriptor& descriptor,
1562  Optional<std::string&> reasonIfUnsupported) const
1563 {
1564  boost::ignore_unused(descriptor);
1565  bool supported = true;
1566 
1567  std::array<DataType, 3> supportedTypes =
1568  {
1569  DataType::Float32,
1570  DataType::QAsymmU8,
1571  DataType::QSymmS16
1572  };
1573 
1574  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1575  "Reference Slice: input type not supported");
1576 
1577  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1578  "Reference Slice: output type not supported");
1579 
1580  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1581  "Reference Slice: input and output types are mismatched");
1582 
1583  return supported;
1584 }
1585 
1587  const TensorInfo& output,
1588  const SoftmaxDescriptor& descriptor,
1589  Optional<std::string&> reasonIfUnsupported) const
1590 {
1591  boost::ignore_unused(descriptor);
1592  bool supported = true;
1593  std::array<DataType,6> supportedTypes =
1594  {
1595  DataType::Float32,
1596  DataType::Float16,
1597  DataType::QSymmS8,
1598  DataType::QAsymmS8,
1599  DataType::QAsymmU8,
1600  DataType::QSymmS16
1601  };
1602 
1603  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1604  "Reference Softmax: output type not supported");
1605 
1606  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1607  "Reference Softmax: input type not supported");
1608 
1609  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1610  "Reference Softmax: input type not supported");
1611 
1612  return supported;
1613 }
1614 
1616  const TensorInfo& output,
1617  const SpaceToBatchNdDescriptor& descriptor,
1618  Optional<std::string&> reasonIfUnsupported) const
1619 {
1620  boost::ignore_unused(descriptor);
1621  bool supported = true;
1622  std::array<DataType,4> supportedTypes =
1623  {
1624  DataType::Float32,
1625  DataType::Float16,
1626  DataType::QAsymmU8,
1627  DataType::QSymmS16
1628  };
1629 
1630  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1631  "Reference SpaceToBatchNd: input type not supported");
1632 
1633  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1634  "Reference SpaceToBatchNd: output type not supported");
1635 
1636  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1637  "Reference SpaceToBatchNd: input and output types are mismatched");
1638 
1639  return supported;
1640 }
1641 
1643  const TensorInfo& output,
1644  const SpaceToDepthDescriptor& descriptor,
1645  Optional<std::string&> reasonIfUnsupported) const
1646 {
1647 
1648  ignore_unused(descriptor);
1649  bool supported = true;
1650 
1651  std::array<DataType,4> supportedTypes =
1652  {
1653  DataType::Float32,
1654  DataType::Float16,
1655  DataType::QAsymmU8,
1656  DataType::QSymmS16
1657  };
1658 
1659  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1660  "Reference SpaceToDepth: input type not supported");
1661 
1662  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1663  "Reference SpaceToDepth: output type not supported");
1664 
1665  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1666  "Reference SpaceToDepth: input and output types are mismatched");
1667 
1668  return supported;
1669 }
1670 
1672  const ViewsDescriptor& descriptor,
1673  Optional<std::string&> reasonIfUnsupported) const
1674 {
1675  ignore_unused(descriptor);
1676  bool supported = true;
1677  std::array<DataType,4> supportedTypes =
1678  {
1679  DataType::Float32,
1680  DataType::Float16,
1681  DataType::QAsymmU8,
1682  DataType::QSymmS16
1683  };
1684 
1685  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1686  "Reference splitter: input type not supported");
1687 
1688  return supported;
1689 }
1690 
1692  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1693  const ViewsDescriptor& descriptor,
1694  Optional<std::string&> reasonIfUnsupported) const
1695 {
1696  ignore_unused(descriptor);
1697  bool supported = true;
1698  std::array<DataType,4> supportedTypes =
1699  {
1700  DataType::Float32,
1701  DataType::Float16,
1702  DataType::QAsymmU8,
1703  DataType::QSymmS16
1704  };
1705 
1706  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1707  "Reference splitter: output type not supported");
1708  for (const TensorInfo output : outputs)
1709  {
1710  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1711  "Reference splitter: input type not supported");
1712 
1713  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1714  "Reference splitter: input and output types mismatched.");
1715  }
1716 
1717  return supported;
1718 }
1719 
1720 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1721  const TensorInfo& output,
1722  const StackDescriptor& descriptor,
1723  Optional<std::string&> reasonIfUnsupported) const
1724 {
1725  ignore_unused(descriptor);
1726 
1727  bool supported = true;
1728  std::array<DataType,4> supportedTypes =
1729  {
1730  DataType::Float32,
1731  DataType::Float16,
1732  DataType::QAsymmU8,
1733  DataType::QSymmS16
1734  };
1735 
1736  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1737  "Reference stack: output type not supported");
1738  for (const TensorInfo* input : inputs)
1739  {
1740  BOOST_ASSERT(input != nullptr);
1741  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
1742  "Reference stack: input type not supported");
1743 
1744  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
1745  "Reference stack: input and output types mismatched.");
1746  }
1747 
1748  return supported;
1749 }
1750 
1752  const TensorInfo& output,
1753  const StridedSliceDescriptor& descriptor,
1754  Optional<std::string&> reasonIfUnsupported) const
1755 {
1756  ignore_unused(descriptor);
1757  bool supported = true;
1758 
1759  std::array<DataType,3> supportedTypes =
1760  {
1761  DataType::Float32,
1762  DataType::QAsymmU8,
1763  DataType::QSymmS16
1764  };
1765 
1766  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1767  "Reference StridedSlice: input type not supported");
1768 
1769  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1770  "Reference StridedSlice: output type not supported");
1771 
1772  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1773  "Reference StridedSlice: input and output types are mismatched");
1774 
1775  return supported;
1776 }
1777 
1779  const TensorInfo& input1,
1780  const TensorInfo& output,
1781  Optional<std::string&> reasonIfUnsupported) const
1782 {
1783  bool supported = true;
1784 
1785  std::array<DataType,4> supportedTypes = {
1786  DataType::Float32,
1787  DataType::Float16,
1788  DataType::QAsymmU8,
1789  DataType::QSymmS16
1790  };
1791 
1792  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1793  "Reference subtraction: input 0 is not a supported type.");
1794 
1795  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1796  "Reference subtraction: input 1 is not a supported type.");
1797 
1798  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1799  "Reference subtraction: output is not a supported type.");
1800 
1801  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1802  "Reference subtraction: input 0 and Input 1 types are mismatched");
1803 
1804  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1805  "Reference subtraction: input and output types are mismatched");
1806 
1807  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1808  "Reference subtraction: shapes are not suitable for implicit broadcast.");
1809 
1810  return supported;
1811 }
1812 
1814  const TensorInfo& alpha,
1815  const TensorInfo& output,
1816  Optional<std::string&> reasonIfUnsupported) const
1817 {
1818  bool supported = true;
1819 
1820  std::array<DataType, 4> supportedTypes
1821  {
1822  DataType::Float32,
1823  DataType::Float16,
1824  DataType::QAsymmU8,
1825  DataType::QSymmS16
1826  };
1827 
1828  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1829  "PReLU: input is not a supported type.");
1830 
1831  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
1832  "PReLU: alpha is not a supported type.");
1833 
1834  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1835  "PReLU: output is not a supported type.");
1836 
1837  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
1838  "PReLU: input, alpha and output types are mismatched");
1839 
1840  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
1841  "PReLU: shapes are not suitable for implicit broadcast");
1842 
1843  return supported;
1844 }
1845 
1847  const TensorInfo& output,
1848  const TransposeConvolution2dDescriptor& descriptor,
1849  const TensorInfo& weights,
1850  const Optional<TensorInfo>& biases,
1851  Optional<std::string&> reasonIfUnsupported) const
1852 {
1853  boost::ignore_unused(descriptor);
1854  bool supported = true;
1855 
1856  std::array<DataType,4> supportedTypes =
1857  {
1858  DataType::Float32,
1859  DataType::Float16,
1860  DataType::QAsymmU8,
1861  DataType::QSymmS16
1862  };
1863 
1864  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1865  "Reference TransposeConvolution2d: input is not a supported type.");
1866 
1867  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1868  "Reference TransposeConvolution2d: output is not a supported type.");
1869 
1870  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1871  "Reference TransposeConvolution2d: input and output types mismatched.");
1872 
1873 
1874  const DataType inputType = input.GetDataType();
1875  if (inputType == DataType::QAsymmU8)
1876  {
1878  std::array<DataType, 3> supportedWeightTypes =
1879  {
1880  DataType::QAsymmU8,
1881  DataType::QSymmS8,
1882  DataType::QuantizedSymm8PerAxis //Deprecated
1883  };
1885 
1886  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1887  "Reference TransposeConvolution2d: weights type not supported for "
1888  "quantized input.");
1889  }
1890  else
1891  {
1892  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1893  "Reference TransposeConvolution2d: weights is not a supported type.");
1894 
1895  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1896  "Reference TransposeConvolution2d: input and weights types mismatched.");
1897  }
1898 
1899  if (biases.has_value())
1900  {
1901  std::array<DataType,3> biasesSupportedTypes =
1902  {
1903  DataType::Float32,
1904  DataType::Float16,
1905  DataType::Signed32
1906  };
1907  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1908  "Reference TransposeConvolution2d: biases is not a supported type.");
1909  }
1910 
1911  return supported;
1912 }
1913 
1914 } // namespace armnn
bool m_ProjectionEnabled
Enable/disable the projection layer.
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMemCopySupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
bool IsRsqrtSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool IsResizeBilinearSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A PadDescriptor for the PadLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsBatchNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
bool IsDebugSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool m_BiasEnabled
Enable/disable bias.
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsGreaterSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
ISubgraphViewConverter supported
A ReshapeDescriptor for the ReshapeLayer.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_PeepholeEnabled
Enable/disable peephole.
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
An LstmDescriptor for the LstmLayer.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
bool IsGatherSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
A StackDescriptor for the StackLayer.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SoftmaxDescriptor for the SoftmaxLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsOutputSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvertFp16ToFp32Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
DataType
Definition: Types.hpp:32
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
bool IsFakeQuantizationSupported(const BackendId &backend, const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
DataType GetDataType() const
Definition: Tensor.hpp:95
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStackSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const StackDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMergerSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A Pooling2dDescriptor for the Pooling2dLayer.
bool IsConvertFp32ToFp16Supported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsPermuteSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SliceDescriptor for the SliceLayer.
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
bool m_LayerNormEnabled
Enable/disable layer normalization.
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsEqualSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A PermuteDescriptor for the PermuteLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
A MeanDescriptor for the MeanLayer.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool has_value() const noexcept
Definition: Optional.hpp:53
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
A ResizeDescriptor for the ResizeLayer.
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
bool IsArgMinMaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
bool IsDetectionPostProcessSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const DetectionPostProcessDescriptor &descriptor, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
bool IsQuantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength)
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:237
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
A StridedSliceDescriptor for the StridedSliceLayer.