ArmNN
 20.11
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/Descriptors.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
61 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
62  Optional<std::string&> reasonIfUnsupported) const
63 {
64  return IsElementwiseUnarySupported(input,
65  output,
67  reasonIfUnsupported);
68 }
69 
71  const TensorInfo& output,
72  const ActivationDescriptor& descriptor,
73  Optional<std::string&> reasonIfUnsupported) const
74 {
75  bool supported = true;
76 
77  // Define supported types.
78  std::array<DataType,6> supportedTypes = {
85  };
86 
87  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
88  "Reference activation: input type not supported.");
89 
90  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
91  "Reference activation: output type not supported.");
92 
93  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
94  "Reference activation: input and output types mismatched.");
95 
96  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
97  "Reference activation: input and output shapes are of different rank.");
98 
99 
100  struct ActivationFunctionSupported : public Rule
101  {
102  ActivationFunctionSupported(const ActivationDescriptor& desc)
103  {
104  switch(desc.m_Function)
105  {
118  {
119  m_Res = true;
120  break;
121  }
122  default:
123  {
124  m_Res = false;
125  break;
126  }
127  }
128  }
129  };
130 
131  // Function is supported
132  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
133  "Reference activation: function not supported.");
134 
135  return supported;
136 }
137 
139  const TensorInfo& input1,
140  const TensorInfo& output,
141  Optional<std::string&> reasonIfUnsupported) const
142 {
143  bool supported = true;
144 
145  std::array<DataType,7> supportedTypes = {
153  };
154 
155  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
156  "Reference addition: input 0 is not a supported type.");
157 
158  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
159  "Reference addition: input 1 is not a supported type.");
160 
161  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
162  "Reference addition: output is not a supported type.");
163 
164  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
165  "Reference addition: input 0 and Input 1 types are mismatched");
166 
167  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
168  "Reference addition: input and output types are mismatched");
169 
170  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
171  "Reference addition: shapes are not suitable for implicit broadcast.");
172 
173  return supported;
174 }
175 
177  const armnn::ArgMinMaxDescriptor &descriptor,
178  armnn::Optional<std::string &> reasonIfUnsupported) const
179 {
180  IgnoreUnused(descriptor);
181 
182  std::array<DataType, 7> supportedTypes =
183  {
191  };
192 
193  bool supported = true;
194 
195  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
196  "Reference ArgMinMax: input is not a supported type.");
197  supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
198  "Reference ArgMinMax: output type not supported");
199 
200  return supported;
201 }
202 
204  const TensorInfo& output,
205  const TensorInfo& mean,
206  const TensorInfo& variance,
207  const TensorInfo& beta,
208  const TensorInfo& gamma,
209  const BatchNormalizationDescriptor& descriptor,
210  Optional<std::string&> reasonIfUnsupported) const
211 {
212  IgnoreUnused(descriptor);
213 
214  std::array<DataType, 6> supportedTypes =
215  {
222  };
223 
224  bool supported = true;
225 
226  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
227  "Reference batch normalization: input is not a supported type.");
228 
229  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
230  "Reference batch normalization: output is not a supported type.");
231 
232  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
233  "Reference batch normalization: input and output types are mismatched");
234 
235  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
236  "Reference batch normalization: mean is not a supported type.");
237 
238  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
239  "Reference batch normalization: variance is not a supported type.");
240 
241  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
242  "Reference batch normalization: beta is not a supported type.");
243 
244  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
245  "Reference batch normalization: gamma is not a supported type.");
246 
247  return supported;
248 }
249 
251  const TensorInfo& output,
252  const BatchToSpaceNdDescriptor& descriptor,
253  Optional<std::string&> reasonIfUnsupported) const
254 {
255  IgnoreUnused(descriptor);
256 
257  bool supported = true;
258 
259  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
260  std::string inputTensorStr = "input";
261  std::string outputTensorStr = "output";
262 
263  // Define supported types.
264  std::array<DataType,6> supportedTypes =
265  {
272  };
273 
274  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
275  "Reference BatchToSpaceNd: input type not supported.");
276 
277  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
278  "Reference BatchToSpaceNd: output type not supported.");
279 
280  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
281  "Reference BatchToSpaceNd: input and output types mismatched.");
282 
283  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
284  reasonIfUnsupported,
285  CreateIncorrectDimensionsErrorMsg(4,
286  output.GetNumDimensions(),
287  batchToSpaceNdLayerStr,
288  outputTensorStr).data());
289 
290  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
291  reasonIfUnsupported,
292  CreateIncorrectDimensionsErrorMsg(4,
293  input.GetNumDimensions(),
294  batchToSpaceNdLayerStr,
295  inputTensorStr).data());
296 
297  return supported;
298 }
299 
301  const TensorInfo& input1,
302  const TensorInfo& output,
303  const ComparisonDescriptor& descriptor,
304  Optional<std::string&> reasonIfUnsupported) const
305 {
306  IgnoreUnused(descriptor);
307  std::array<DataType, 8> supportedInputTypes =
308  {
317  };
318 
319  bool supported = true;
320  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
321  "Reference comparison: input 0 is not a supported type");
322 
323  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
324  "Reference comparison: input 0 and Input 1 types are mismatched");
325 
326  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
327  "Reference comparison: output is not of type Boolean");
328 
329  return supported;
330 }
331 
332 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
333  const TensorInfo& output,
334  const ConcatDescriptor& descriptor,
335  Optional<std::string&> reasonIfUnsupported) const
336 {
337  IgnoreUnused(descriptor);
338 
339  bool supported = true;
340  std::array<DataType,6> supportedTypes =
341  {
348  };
349 
350  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
351  "Reference concatenation: output type not supported");
352  for (const TensorInfo* input : inputs)
353  {
354  ARMNN_ASSERT(input != nullptr);
355  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
356  "Reference concatenation: input type not supported");
357 
358  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
359  "Reference concatenation: input and output types mismatched.");
360  }
361 
362  return supported;
363 }
364 
366  Optional<std::string&> reasonIfUnsupported) const
367 {
368  std::array<DataType,8> supportedTypes =
369  {
378  };
379 
380  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
381  "Reference constant: output is not a supported type.");
382 }
383 
385  const TensorInfo& output,
386  Optional<std::string&> reasonIfUnsupported) const
387 {
388  bool supported = true;
389 
390  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
391  "Reference for ConvertBf16ToFp32 layer: input type not supported");
392 
393  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
394  "Reference for ConvertBf16ToFp32 layer: output type not supported");
395 
396  return supported;
397 }
398 
400  const TensorInfo& output,
401  Optional<std::string&> reasonIfUnsupported) const
402 {
403  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
404  input.GetDataType(),
405  &TrueFunc<>,
406  &FalseInputFuncF32<>,
407  &FalseFuncU8<>,
408  &FalseFuncI32<>,
409  &FalseFuncU8<>) &&
410  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
411  output.GetDataType(),
412  &FalseOutputFuncF16<>,
413  &TrueFunc<>,
414  &FalseFuncU8<>,
415  &FalseFuncI32<>,
416  &FalseFuncU8<>));
417 }
418 
420  const TensorInfo& output,
421  Optional<std::string&> reasonIfUnsupported) const
422 {
423  bool supported = true;
424 
425  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
426  "Reference for ConvertFp32ToBf16 layer: input type not supported");
427 
428  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
429  "Reference for ConvertFp32ToBf16 layer: output type not supported");
430 
431  return supported;
432 }
433 
435  const TensorInfo& output,
436  Optional<std::string&> reasonIfUnsupported) const
437 {
438  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
439  input.GetDataType(),
440  &FalseInputFuncF16<>,
441  &TrueFunc<>,
442  &FalseFuncU8<>,
443  &FalseFuncI32<>,
444  &FalseFuncU8<>) &&
445  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
446  output.GetDataType(),
447  &TrueFunc<>,
448  &FalseOutputFuncF32<>,
449  &FalseFuncU8<>,
450  &FalseFuncI32<>,
451  &FalseFuncU8<>));
452 }
453 
455  const TensorInfo& output,
456  const Convolution2dDescriptor& descriptor,
457  const TensorInfo& weights,
458  const Optional<TensorInfo>& biases,
459  Optional<std::string&> reasonIfUnsupported) const
460 {
461  bool supported = true;
462 
463  // Define supported types.
464  std::array<DataType,7> supportedTypes =
465  {
473  };
474 
475  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
476  "Reference Convolution2d: input is not a supported type.");
477 
478  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
479  "Reference Convolution2d: output is not a supported type.");
480 
481  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
482  if (input.GetDataType() == DataType::BFloat16)
483  {
484  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
485  {
486  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
487  supported = false;
488  }
489  }
490  else
491  {
492  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
493  "Reference Convolution2d: input and output types mismatched.");
494  }
495 
496  const DataType inputType = input.GetDataType();
497  if (IsQuantized8BitType(inputType))
498  {
500  std::array<DataType, 4> supportedWeightTypes =
501  {
505  DataType::QuantizedSymm8PerAxis // deprecated
506  };
508 
509  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
510  "Reference Convolution2d: weights type not supported for quantized input.");
511  }
512  else
513  {
514  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
515  "Reference Convolution2d: weights is not a supported type.");
516 
517  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
518  "Reference Convolution2d: input and weights types mismatched.");
519  }
520 
521  if (biases.has_value())
522  {
523  std::array<DataType,4> biasesSupportedTypes =
524  {
529  };
530 
531  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
532  "Reference Convolution2d: biases is not a supported type.");
533  }
534  IgnoreUnused(descriptor);
535 
536  return supported;
537 }
538 
540  const TensorInfo& output,
541  Optional<std::string&> reasonIfUnsupported) const
542 {
543  bool supported = true;
544 
545  std::array<DataType, 8> supportedTypes =
546  {
555  };
556 
557  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
558  "Reference for Debug layer: input type not supported");
559 
560  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
561  "Reference for Debug layer: output type not supported");
562 
563  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
564  "Reference for Debug layer: input and output types are mismatched");
565 
566  return supported;
567 }
568 
570  const TensorInfo& output,
571  const DepthToSpaceDescriptor& descriptor,
572  Optional<std::string&> reasonIfUnsupported) const
573 {
574  IgnoreUnused(descriptor);
575  bool supported = true;
576 
577  std::array<DataType,6> supportedTypes =
578  {
585  };
586 
587  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
588  "Reference DepthToSpace: input type not supported");
589 
590  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
591  "Reference DepthToSpace: output type not supported");
592 
593  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
594  "Reference DepthToSpace: input and output types are mismatched");
595 
596  return supported;
597 }
598 
600  const TensorInfo& output,
601  const DepthwiseConvolution2dDescriptor& descriptor,
602  const TensorInfo& weights,
603  const Optional<TensorInfo>& biases,
604  Optional<std::string&> reasonIfUnsupported) const
605 {
606  IgnoreUnused(descriptor);
607  bool supported = true;
608 
609  // Define supported types.
610  std::array<DataType,7> supportedTypes =
611  {
619  };
620 
621  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
622  "Reference DepthwiseConvolution2d: input is not a supported type.");
623 
624  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
625  "Reference DepthwiseConvolution2d: output is not a supported type.");
626 
627  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
628  "Reference DepthwiseConvolution2d: input and output types mismatched.");
629 
630  const DataType inputType = input.GetDataType();
631  if (IsQuantized8BitType(inputType))
632  {
634  std::array<DataType, 4> supportedWeightTypes =
635  {
639  DataType::QuantizedSymm8PerAxis // deprecated
640  };
642 
643  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
644  "Reference DepthwiseConvolution2d: weights type not supported for "
645  "quantized input.");
646  }
647  else
648  {
649  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
650  "Reference DepthwiseConvolution2d: weights is not a supported type.");
651 
652  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
653  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
654  }
655 
656  if (biases.has_value())
657  {
658  std::array<DataType,4> biasesSupportedTypes =
659  {
664  };
665  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
666  "Reference DepthwiseConvolution2d: biases is not a supported type.");
667  }
668 
669  return supported;
670 
671 }
672 
674  const TensorInfo& output,
675  Optional<std::string&> reasonIfUnsupported) const
676 {
677  bool supported = true;
678 
679  std::array<DataType,4> supportedInputTypes = {
684  };
685 
686  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
687  "Reference for Dequantize layer: input type not supported.");
688 
689  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
690  "Reference for Dequantize layer: per-axis quantized input not support .");
691 
692  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
693  "Reference dequantize: per-axis quantized input not support .");
694 
695  std::array<DataType,3> supportedOutputTypes = {
699  };
700 
701  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
702  "Reference for Dequantize layer: output type not supported.");
703 
704  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
705  "Reference for Dequantize layer: input/output shapes have different num total "
706  "elements.");
707 
708  return supported;
709 }
710 
712  const TensorInfo& scores,
713  const TensorInfo& anchors,
714  const TensorInfo& detectionBoxes,
715  const TensorInfo& detectionClasses,
716  const TensorInfo& detectionScores,
717  const TensorInfo& numDetections,
718  const DetectionPostProcessDescriptor& descriptor,
719  Optional<std::string&> reasonIfUnsupported) const
720 {
721  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
722 
723  bool supported = true;
724 
725  std::array<DataType,6> supportedInputTypes =
726  {
733  };
734 
735  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
736  "Reference DetectionPostProcess: input 0 is not a supported type.");
737 
738  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
739  "Reference DetectionPostProcess: input 1 is not a supported type.");
740 
741  return supported;
742 }
743 
745  const TensorInfo& output,
746  const DepthwiseConvolution2dDescriptor& descriptor,
747  const TensorInfo& weights,
748  const Optional<TensorInfo>& biases,
749  Optional<std::string&> reasonIfUnsupported) const
750 {
751  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
752 }
753 
755  const TensorInfo& input1,
756  const TensorInfo& output,
757  Optional<std::string&> reasonIfUnsupported) const
758 {
759  bool supported = true;
760 
761  std::array<DataType,7> supportedTypes = {
769  };
770 
771  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
772  "Reference division: input 0 is not a supported type.");
773 
774  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
775  "Reference division: input 1 is not a supported type.");
776 
777  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
778  "Reference division: output is not a supported type.");
779 
780  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
781  "Reference division: input 0 and Input 1 types are mismatched");
782 
783  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
784  "Reference division: input and output types are mismatched");
785 
786  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
787  "Reference division: shapes are not suitable for implicit broadcast.");
788 
789  return supported;
790 }
791 
793  const TensorInfo& output,
794  const ElementwiseUnaryDescriptor& descriptor,
795  Optional<std::string&> reasonIfUnsupported) const
796 {
797  IgnoreUnused(descriptor);
798 
799  std::array<DataType, 7> supportedTypes =
800  {
808  };
809 
810  std::array<DataType, 1> logicalSupportedTypes =
811  {
813  };
814 
815  bool supported = true;
816 
817  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
818  {
819  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
820  "Reference elementwise unary: input type not supported");
821 
822  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
823  "Reference elementwise unary: output type not supported");
824  }
825  else
826  {
827  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
828  "Reference elementwise unary: input type not supported");
829 
830  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
831  "Reference elementwise unary: output type not supported");
832  }
833 
834  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
835  "Reference elementwise unary: input and output types not matching");
836 
837  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
838  "Reference elementwise unary: input and output shapes"
839  "have different number of total elements");
840 
841  return supported;
842 }
843 
845  const TensorInfo& input1,
846  const TensorInfo& output,
847  Optional<std::string&> reasonIfUnsupported) const
848 {
849  return IsComparisonSupported(input0,
850  input1,
851  output,
853  reasonIfUnsupported);
854 }
855 
857  const FakeQuantizationDescriptor& descriptor,
858  Optional<std::string&> reasonIfUnsupported) const
859 {
860  IgnoreUnused(descriptor);
861  bool supported = true;
862 
863  std::array<DataType,1> supportedTypes =
864  {
866  };
867 
868  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
869  "Reference fake quantization: input type not supported.");
870 
871  return supported;
872 }
873 
875  const TensorInfo& output,
876  const FillDescriptor& descriptor,
877  Optional<std::string&> reasonIfUnsupported) const
878 {
879  IgnoreUnused(descriptor);
880  IgnoreUnused(output);
881 
882  bool supported = true;
883 
884  std::array<DataType,3> supportedTypes =
885  {
889  };
890 
891  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
892  "Reference Fill: input type not supported.");
893 
894  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
895  "Reference Fill: output type not supported.");
896  return supported;
897 }
898 
900  const TensorInfo& output,
901  Optional<std::string&> reasonIfUnsupported) const
902 {
903  IgnoreUnused(output);
904  bool supported = true;
905 
906  std::array<DataType,3> supportedTypes =
907  {
911  };
912 
913  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
914  "Reference Floor: input type not supported.");
915 
916  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
917  "Reference Floor: output type not supported.");
918 
919  return supported;
920 }
921 
923  const TensorInfo& output,
924  const TensorInfo& weights,
925  const TensorInfo& biases,
926  const FullyConnectedDescriptor& descriptor,
927  Optional<std::string&> reasonIfUnsupported) const
928 {
929  bool supported = true;
930 
931  // Define supported types.
932  std::array<DataType,6> supportedTypes =
933  {
940  };
941 
942  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
943  "Reference Fully Connected: input type not supported.");
944 
945  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
946  "Reference Fully Connected: output type not supported.");
947 
948  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
949  "Reference Fully Connected: weights type not supported.");
950 
951  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
952  if (input.GetDataType() == DataType::BFloat16)
953  {
954  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
955  {
956  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
957  supported = false;
958  }
959  }
960  else
961  {
962  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
963  "Reference Fully Connected: input and output types mismatched.");
964  }
965 
966  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
967  "Reference Fully Connected: weights is not a supported type.");
968 
969  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
970  "Reference Fully Connected: input and weights types mismatched.");
971 
972  if (descriptor.m_BiasEnabled)
973  {
974  // Defined supported types for bias
975  std::array<DataType, 5>
976  supportedBiasTypes =
977  {
983  };
984 
985  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
986  "Reference Fully Connected: bias type not supported.");
987 
988  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
989  "Reference Fully Connected: bias and weight types mismatch.");
990 
991  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
992  "Reference Fully Connected: bias type inferred from weights is incompatible.");
993 
994  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
995  "Reference Fully Connected: bias must have 1 dimension.");
996 
997  }
998 
999  return supported;
1000 }
1001 
1003  const armnn::TensorInfo& input1,
1004  const armnn::TensorInfo& output,
1005  const GatherDescriptor& descriptor,
1006  armnn::Optional<std::string&> reasonIfUnsupported) const
1007 {
1008  bool supported = true;
1009  std::array<DataType,7> supportedTypes =
1010  {
1018  };
1019 
1020  if (descriptor.m_Axis != 0)
1021  {
1022  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1023  supported &= false;
1024  }
1025  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1026  "Reference Gather: input type not supported");
1027 
1028  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1029  "Reference Gather: output type not supported");
1030 
1031  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1032  "Reference Gather: indices (input1) type not supported");
1033 
1034  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1035  "Reference Gather: input and output types not matching");
1036 
1037  return supported;
1038 }
1039 
1041  const TensorInfo& input1,
1042  const TensorInfo& output,
1043  Optional<std::string&> reasonIfUnsupported) const
1044 {
1045  return IsComparisonSupported(input0,
1046  input1,
1047  output,
1049  reasonIfUnsupported);
1050 }
1051 
1053  Optional<std::string&> /*reasonIfUnsupported*/) const
1054 {
1055  return true;
1056 }
1057 
1059  const TensorInfo& output,
1060  const InstanceNormalizationDescriptor& descriptor,
1061  Optional<std::string&> reasonIfUnsupported) const
1062 {
1063  IgnoreUnused(descriptor);
1064  // Define supported types
1065  std::array<DataType, 3> supportedTypes =
1066  {
1070  };
1071 
1072  bool supported = true;
1073 
1074  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1075  "Reference Instance Normalization: input type not supported.");
1076 
1077  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1078  "Reference Instance Normalization: output type not supported.");
1079 
1080  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1081  "Reference Instance Normalization: input and output types mismatched.");
1082 
1083  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1084  "Reference Instance Normalization: input and output shapes have different "
1085  "num total elements.");
1086 
1087  return supported;
1088 }
1089 
1091  const TensorInfo& output,
1092  const L2NormalizationDescriptor& descriptor,
1093  Optional<std::string&> reasonIfUnsupported) const
1094 {
1095  IgnoreUnused(descriptor);
1096  // Define supported types
1097  std::array<DataType, 6> supportedTypes =
1098  {
1105  };
1106 
1107  bool supported = true;
1108 
1109  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1110  "Reference L2normalization: input type not supported.");
1111 
1112  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1113  "Reference L2normalization: output type not supported.");
1114 
1115  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1116  "Reference L2normalization: input and output types mismatched.");
1117 
1118  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1119  "Reference L2normalization: input and output shapes have different "
1120  "num total elements.");
1121 
1122  return supported;
1123 }
1124 
1126  const TensorInfo& input1,
1127  const TensorInfo& output,
1128  const LogicalBinaryDescriptor& descriptor,
1129  Optional<std::string&> reasonIfUnsupported) const
1130 {
1131  IgnoreUnused(descriptor);
1132 
1133  std::array<DataType, 1> supportedTypes =
1134  {
1136  };
1137 
1138  bool supported = true;
1139  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1140  "Reference LogicalBinary: input 0 type not supported");
1141  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1142  "Reference LogicalBinary: input 1 type not supported");
1143 
1144  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1145  "Reference LogicalBinary: input and output types do not match");
1146 
1147  return supported;
1148 }
1149 
1151  const TensorInfo& output,
1152  const ElementwiseUnaryDescriptor& descriptor,
1153  Optional<std::string&> reasonIfUnsupported) const
1154 {
1155  IgnoreUnused(descriptor);
1156 
1157  std::array<DataType, 1> supportedTypes =
1158  {
1160  };
1161 
1162  bool supported = true;
1163  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1164  "Reference LogicalUnary: input type not supported");
1165 
1166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1167  "Reference LogicalUnary: input and output types do not match");
1168 
1169  return supported;
1170 }
1171 
1173  const TensorInfo& output,
1174  const LogSoftmaxDescriptor& descriptor,
1175  Optional<std::string&> reasonIfUnsupported) const
1176 {
1177  IgnoreUnused(descriptor);
1178 
1179  std::array<DataType, 3> supportedTypes =
1180  {
1184  };
1185 
1186  bool supported = true;
1187  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1188  "Reference LogSoftmax: input type not supported");
1189 
1190  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1191  "Reference LogSoftmax: output type not supported");
1192 
1193  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1194  "Reference LogSoftmax: input and output types do not match");
1195 
1196  return supported;
1197 }
1198 
1200  const TensorInfo& outputStateIn,
1201  const TensorInfo& cellStateIn,
1202  const TensorInfo& scratchBuffer,
1203  const TensorInfo& outputStateOut,
1204  const TensorInfo& cellStateOut,
1205  const TensorInfo& output,
1206  const LstmDescriptor& descriptor,
1207  const LstmInputParamsInfo& paramsInfo,
1208  Optional<std::string&> reasonIfUnsupported) const
1209 {
1210  IgnoreUnused(descriptor);
1211  IgnoreUnused(paramsInfo);
1212 
1213  bool supported = true;
1214 
1215  std::array<DataType,3> supportedTypes = {
1219  };
1220 
1221  // check inputs and outputs
1222  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1223  "Reference Lstm: input is not a supported type.");
1224  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1225  "Reference Lstm: input and outputStateIn types are mismatched");
1226  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1227  "Reference Lstm: input and cellStateIn types are mismatched");
1228  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1229  "Reference Lstm: input and scratchBuffer types are mismatched");
1230  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1231  "Reference Lstm: input and outputStateOut types are mismatched");
1232  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1233  "Reference Lstm: input and cellStateOut types are mismatched");
1234  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1235  "Reference Lstm: input and output types are mismatched");
1236  // check layer parameters
1237  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1238  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1239  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1240  "Reference Lstm: input and InputToCellWeights types are mismatched");
1241  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1242  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1243  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1244  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1245  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1246  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1247  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1248  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1249  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1250  "Reference Lstm: input and ForgetGateBias types are mismatched");
1251  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1252  "Reference Lstm: input and CellBias types are mismatched");
1253  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1254  "Reference Lstm: input and OutputGateBias types are mismatched");
1255  if (!descriptor.m_CifgEnabled)
1256  {
1257  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1258  "Reference Lstm: input and InputToInputWeights types are mismatched");
1259  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1260  reasonIfUnsupported,
1261  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1262  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1263  "Reference Lstm: input and InputGateBias types are mismatched");
1264  if (descriptor.m_PeepholeEnabled)
1265  {
1266  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1267  reasonIfUnsupported,
1268  "Reference Lstm: input and CellToInputWeights types are mismatched");
1269  }
1270  }
1271  if (descriptor.m_PeepholeEnabled)
1272  {
1273  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1274  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1275  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1276  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1277  }
1278  if (descriptor.m_ProjectionEnabled)
1279  {
1280  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1281  "Reference Lstm: input and mProjectionWeights types are mismatched");
1282  if (paramsInfo.m_ProjectionBias != nullptr)
1283  {
1284  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1285  "Reference Lstm: input and ProjectionBias types are mismatched");
1286  }
1287  }
1288  if (descriptor.m_LayerNormEnabled)
1289  {
1290  if (!descriptor.m_CifgEnabled)
1291  {
1292  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1293  reasonIfUnsupported,
1294  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1295  }
1296  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1297  reasonIfUnsupported,
1298  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1299  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1300  reasonIfUnsupported,
1301  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1302  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1303  reasonIfUnsupported,
1304  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1305  }
1306 
1307  return supported;
1308 }
1309 
1311  const TensorInfo& input1,
1312  const TensorInfo& output,
1313  Optional<std::string&> reasonIfUnsupported) const
1314 {
1315  bool supported = true;
1316 
1317  std::array<DataType,7> supportedTypes = {
1325  };
1326 
1327  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1328  "Reference maximum: input 0 is not a supported type.");
1329 
1330  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1331  "Reference maximum: input 1 is not a supported type.");
1332 
1333  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1334  "Reference maximum: output is not a supported type.");
1335 
1336  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1337  "Reference maximum: input 0 and Input 1 types are mismatched");
1338 
1339  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1340  "Reference maximum: input and output types are mismatched");
1341 
1342  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1343  "Reference maximum: shapes are not suitable for implicit broadcast.");
1344 
1345  return supported;
1346 }
1347 
1349  const TensorInfo& output,
1350  const MeanDescriptor& descriptor,
1351  Optional<std::string&> reasonIfUnsupported) const
1352 {
1353  bool supported = true;
1354  std::string meanLayerStr = "Mean";
1355  std::string outputTensorStr = "output";
1356 
1357  std::array<DataType,6> supportedTypes =
1358  {
1365  };
1366 
1367  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1368  "Reference Mean: input type not supported.");
1369 
1370  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1371  "Reference Mean: input and output types are mismatched");
1372 
1373  if (descriptor.m_KeepDims)
1374  {
1375  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1376  reasonIfUnsupported,
1377  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1378  output.GetNumDimensions(),
1379  meanLayerStr, outputTensorStr).data());
1380  }
1381  else if (descriptor.m_Axis.empty())
1382  {
1383  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1384  reasonIfUnsupported,
1385  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1386  meanLayerStr, outputTensorStr).data());
1387  }
1388  else
1389  {
1390  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1391 
1392  if (outputDim > 0)
1393  {
1394  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1395  reasonIfUnsupported,
1396  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1397  meanLayerStr, outputTensorStr).data());
1398  }
1399  else
1400  {
1401  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1402  reasonIfUnsupported,
1403  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1404  meanLayerStr, outputTensorStr).data());
1405  }
1406  }
1407 
1408  return supported;
1409 }
1410 
1411 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
1412  const TensorInfo& output,
1413  const MergerDescriptor& descriptor,
1414  Optional<std::string&> reasonIfUnsupported) const
1415 {
1416  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1417 }
1418 
1420  const TensorInfo &output,
1421  Optional<std::string &> reasonIfUnsupported) const
1422 {
1423  bool supported = true;
1424 
1425  std::array<DataType,7> supportedTypes =
1426  {
1434  };
1435 
1436  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1437  "Reference MemCopy: input type not supported");
1438 
1439  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1440  "Reference MemCopy: output type not supported");
1441 
1442  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1443  "Reference MemCopy: input and output types are mismatched");
1444 
1445  return supported;
1446 }
1447 
1449  const TensorInfo& input1,
1450  const TensorInfo& output,
1451  Optional<std::string&> reasonIfUnsupported) const
1452 {
1453  bool supported = true;
1454 
1455  std::array<DataType,7> supportedTypes = {
1463  };
1464 
1465  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1466  "Reference minimum: input 0 is not a supported type.");
1467 
1468  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1469  "Reference minimum: input 1 is not a supported type.");
1470 
1471  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1472  "Reference minimum: output is not a supported type.");
1473 
1474  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1475  "Reference minimum: input 0 and Input 1 types are mismatched");
1476 
1477  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1478  "Reference minimum: input and output types are mismatched");
1479 
1480  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1481  "Reference minimum: shapes are not suitable for implicit broadcast.");
1482 
1483  return supported;
1484 }
1485 
1487  const TensorInfo& input1,
1488  const TensorInfo& output,
1489  Optional<std::string&> reasonIfUnsupported) const
1490 {
1491  bool supported = true;
1492 
1493  std::array<DataType,7> supportedTypes = {
1501  };
1502 
1503  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1504  "Reference multiplication: input 0 is not a supported type.");
1505 
1506  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1507  "Reference multiplication: input 1 is not a supported type.");
1508 
1509  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1510  "Reference multiplication: output is not a supported type.");
1511 
1512  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1513  "Reference multiplication: input 0 and Input 1 types are mismatched");
1514 
1515  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1516  "Reference multiplication: input and output types are mismatched");
1517 
1518  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1519  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1520 
1521  return supported;
1522 }
1523 
1525  const TensorInfo& output,
1526  const NormalizationDescriptor& descriptor,
1527  Optional<std::string&> reasonIfUnsupported) const
1528 {
1529  IgnoreUnused(descriptor);
1530 
1531  // Define supported types
1532  std::array<DataType, 6> supportedTypes =
1533  {
1540  };
1541 
1542  bool supported = true;
1543 
1544  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1545  "Reference normalization: input type not supported.");
1546 
1547  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1548  "Reference normalization: output type not supported.");
1549 
1550  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1551  "Reference normalization: input and output shapes have different "
1552  "num total elements.");
1553 
1554  return supported;
1555 }
1556 
1558  Optional<std::string&> /*reasonIfUnsupported*/) const
1559 {
1560  return true;
1561 }
1562 
1564  const TensorInfo& output,
1565  const PadDescriptor& descriptor,
1566  Optional<std::string&> reasonIfUnsupported) const
1567 {
1568  IgnoreUnused(descriptor);
1569  bool supported = true;
1570 
1571  // Define supported output and inputs types.
1572  std::array<DataType,6> supportedTypes =
1573  {
1580  };
1581 
1582  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1583  "Reference pad: input is not a supported type.");
1584 
1585  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1586  "Reference pad: output is not a supported type.");
1587 
1588  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1589  "Reference pad: input and output types are mismatched.");
1590 
1591  return supported;
1592 }
1593 
1595  const TensorInfo& output,
1596  const PermuteDescriptor& descriptor,
1597  Optional<std::string&> reasonIfUnsupported) const
1598 {
1599  IgnoreUnused(descriptor);
1600  bool supported = true;
1601 
1602  // Define supported output and inputs types.
1603  std::array<DataType, 6> supportedTypes =
1604  {
1611  };
1612 
1613  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1614  "Reference permute: input is not a supported type.");
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1617  "Reference permute: output is not a supported type.");
1618 
1619  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1620  "Reference permute: input and output types are mismatched.");
1621 
1622  return supported;
1623 }
1624 
1626  const TensorInfo& output,
1627  const Pooling2dDescriptor& descriptor,
1628  Optional<std::string&> reasonIfUnsupported) const
1629 {
1630  IgnoreUnused(descriptor);
1631  bool supported = true;
1632 
1633  // Define supported output and inputs types.
1634  std::array<DataType,6> supportedTypes =
1635  {
1642  };
1643 
1644  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1645  "Reference poolind2d: input is not a supported type.");
1646 
1647  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1648  "Reference poolind2d: output is not a supported type.");
1649 
1650  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1651  "Reference poolind2d: input and output types are mismatched.");
1652 
1653  return supported;
1654 }
1655 
1657  const TensorInfo& previousOutputIn,
1658  const TensorInfo& previousCellStateIn,
1659  const TensorInfo& outputStateOut,
1660  const TensorInfo& cellStateOut,
1661  const TensorInfo& output,
1662  const QLstmDescriptor& descriptor,
1663  const LstmInputParamsInfo& paramsInfo,
1664  Optional<std::string&> reasonIfUnsupported) const
1665 {
1666  IgnoreUnused(input);
1667  IgnoreUnused(previousOutputIn);
1668  IgnoreUnused(previousCellStateIn);
1669  IgnoreUnused(outputStateOut);
1670  IgnoreUnused(cellStateOut);
1671  IgnoreUnused(output);
1672  IgnoreUnused(descriptor);
1673  IgnoreUnused(paramsInfo);
1674 
1675  IgnoreUnused(reasonIfUnsupported);
1676 
1677  return true;
1678 }
1679 
1681  const TensorInfo& output,
1682  Optional<std::string&> reasonIfUnsupported) const
1683 {
1684  bool supported = true;
1685 
1686  // Define supported input types.
1687  std::array<DataType,7> supportedInputTypes = {
1695  };
1696 
1697  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1698  "Reference quantize: input type not supported.");
1699 
1700  // Define supported output types.
1701  std::array<DataType,4> supportedOutputTypes = {
1705  DataType::QSymmS16
1706  };
1707  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1708  "Reference quantize: output type not supported.");
1709 
1710  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1711  "Reference quantize: input and output shapes have different num total elements.");
1712 
1713  return supported;
1714 }
1715 
1717  const TensorInfo& output,
1718  Optional<std::string&> reasonIfUnsupported) const
1719 {
1720  IgnoreUnused(input);
1721  // Define supported output types.
1722  std::array<DataType,1> supportedOutputTypes =
1723  {
1725  };
1726 
1727  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1728  "Reference rank: input type not supported.");
1729 }
1730 
1732  const TensorInfo& output,
1733  const ReshapeDescriptor& descriptor,
1734  Optional<std::string&> reasonIfUnsupported) const
1735 {
1736  IgnoreUnused(output);
1737  IgnoreUnused(descriptor);
1738  // Define supported output types.
1739  std::array<DataType,8> supportedOutputTypes =
1740  {
1749  };
1750 
1751  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1752  "Reference reshape: input type not supported.");
1753 }
1754 
1756  const TensorInfo& output,
1757  Optional<std::string&> reasonIfUnsupported) const
1758 {
1759  bool supported = true;
1760  std::array<DataType,6> supportedTypes =
1761  {
1768  };
1769 
1770  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1771  "Reference ResizeBilinear: input type not supported");
1772 
1773  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1774  "Reference ResizeBilinear: output type not supported");
1775 
1776  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1777  "Reference ResizeBilinear: input and output types not matching");
1778 
1779  return supported;
1780 }
1781 
1783  const TensorInfo& output,
1784  const ResizeDescriptor& descriptor,
1785  Optional<std::string&> reasonIfUnsupported) const
1786 {
1787  IgnoreUnused(descriptor);
1788  bool supported = true;
1789  std::array<DataType,6> supportedTypes =
1790  {
1797  };
1798 
1799  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1800  "Reference Resize: input type not supported");
1801 
1802  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1803  "Reference Resize: output type not supported");
1804 
1805  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1806  "Reference Resize: input and output types not matching");
1807 
1808  return supported;
1809 }
1810 
1812  const TensorInfo& output,
1813  Optional<std::string&> reasonIfUnsupported) const
1814 {
1815  return IsElementwiseUnarySupported(input,
1816  output,
1818  reasonIfUnsupported);
1819 }
1820 
1822  const TensorInfo& output,
1823  const SliceDescriptor& descriptor,
1824  Optional<std::string&> reasonIfUnsupported) const
1825 {
1826  IgnoreUnused(descriptor);
1827  bool supported = true;
1828 
1829  std::array<DataType, 5> supportedTypes =
1830  {
1836  };
1837 
1838  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1839  "Reference Slice: input type not supported");
1840 
1841  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1842  "Reference Slice: output type not supported");
1843 
1844  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1845  "Reference Slice: input and output types are mismatched");
1846 
1847  return supported;
1848 }
1849 
1851  const TensorInfo& output,
1852  const SoftmaxDescriptor& descriptor,
1853  Optional<std::string&> reasonIfUnsupported) const
1854 {
1855  IgnoreUnused(descriptor);
1856  bool supported = true;
1857  std::array<DataType,7> supportedTypes =
1858  {
1866  };
1867 
1868  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1869  "Reference Softmax: output type not supported");
1870 
1871  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1872  "Reference Softmax: input type not supported");
1873 
1874  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1875  "Reference Softmax: input type not supported");
1876 
1877  return supported;
1878 }
1879 
1881  const TensorInfo& output,
1882  const SpaceToBatchNdDescriptor& descriptor,
1883  Optional<std::string&> reasonIfUnsupported) const
1884 {
1885  IgnoreUnused(descriptor);
1886  bool supported = true;
1887  std::array<DataType,6> supportedTypes =
1888  {
1895  };
1896 
1897  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1898  "Reference SpaceToBatchNd: input type not supported");
1899 
1900  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1901  "Reference SpaceToBatchNd: output type not supported");
1902 
1903  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1904  "Reference SpaceToBatchNd: input and output types are mismatched");
1905 
1906  return supported;
1907 }
1908 
1910  const TensorInfo& output,
1911  const SpaceToDepthDescriptor& descriptor,
1912  Optional<std::string&> reasonIfUnsupported) const
1913 {
1914 
1915  IgnoreUnused(descriptor);
1916  bool supported = true;
1917 
1918  std::array<DataType,6> supportedTypes =
1919  {
1926  };
1927 
1928  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1929  "Reference SpaceToDepth: input type not supported");
1930 
1931  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1932  "Reference SpaceToDepth: output type not supported");
1933 
1934  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1935  "Reference SpaceToDepth: input and output types are mismatched");
1936 
1937  return supported;
1938 }
1939 
1941  const ViewsDescriptor& descriptor,
1942  Optional<std::string&> reasonIfUnsupported) const
1943 {
1944  IgnoreUnused(descriptor);
1945  bool supported = true;
1946  std::array<DataType,6> supportedTypes =
1947  {
1954  };
1955 
1956  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1957  "Reference splitter: input type not supported");
1958 
1959  return supported;
1960 }
1961 
1963  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1964  const ViewsDescriptor& descriptor,
1965  Optional<std::string&> reasonIfUnsupported) const
1966 {
1967  IgnoreUnused(descriptor);
1968  bool supported = true;
1969  std::array<DataType,6> supportedTypes =
1970  {
1977  };
1978 
1979  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1980  "Reference splitter: output type not supported");
1981  for (const TensorInfo& output : outputs)
1982  {
1983  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1984  "Reference splitter: input type not supported");
1985 
1986  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1987  "Reference splitter: input and output types mismatched.");
1988  }
1989 
1990  return supported;
1991 }
1992 
1993 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1994  const TensorInfo& output,
1995  const StackDescriptor& descriptor,
1996  Optional<std::string&> reasonIfUnsupported) const
1997 {
1998  IgnoreUnused(descriptor);
1999 
2000  bool supported = true;
2001  std::array<DataType,6> supportedTypes =
2002  {
2009  };
2010 
2011  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2012  "Reference stack: output type not supported");
2013  for (const TensorInfo* input : inputs)
2014  {
2015  ARMNN_ASSERT(input != nullptr);
2016  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2017  "Reference stack: input type not supported");
2018 
2019  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2020  "Reference stack: input and output types mismatched.");
2021  }
2022 
2023  return supported;
2024 }
2025 
2027  const TensorInfo& output,
2028  const StridedSliceDescriptor& descriptor,
2029  Optional<std::string&> reasonIfUnsupported) const
2030 {
2031  IgnoreUnused(descriptor);
2032  bool supported = true;
2033 
2034  std::array<DataType,5> supportedTypes =
2035  {
2041  };
2042 
2043  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2044  "Reference StridedSlice: input type not supported");
2045 
2046  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2047  "Reference StridedSlice: output type not supported");
2048 
2049  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2050  "Reference StridedSlice: input and output types are mismatched");
2051 
2052  return supported;
2053 }
2054 
2056  const TensorInfo& input1,
2057  const TensorInfo& output,
2058  Optional<std::string&> reasonIfUnsupported) const
2059 {
2060  bool supported = true;
2061 
2062  std::array<DataType,7> supportedTypes = {
2070  };
2071 
2072  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2073  "Reference subtraction: input 0 is not a supported type.");
2074 
2075  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2076  "Reference subtraction: input 1 is not a supported type.");
2077 
2078  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2079  "Reference subtraction: output is not a supported type.");
2080 
2081  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2082  "Reference subtraction: input 0 and Input 1 types are mismatched");
2083 
2084  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2085  "Reference subtraction: input and output types are mismatched");
2086 
2087  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2088  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2089 
2090  return supported;
2091 }
2092 
2094  const TensorInfo& alpha,
2095  const TensorInfo& output,
2096  Optional<std::string&> reasonIfUnsupported) const
2097 {
2098  bool supported = true;
2099 
2100  std::array<DataType, 6> supportedTypes
2101  {
2108  };
2109 
2110  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2111  "PReLU: input is not a supported type.");
2112 
2113  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2114  "PReLU: alpha is not a supported type.");
2115 
2116  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2117  "PReLU: output is not a supported type.");
2118 
2119  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2120  "PReLU: input, alpha and output types are mismatched");
2121 
2122  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2123  "PReLU: shapes are not suitable for implicit broadcast");
2124 
2125  return supported;
2126 }
2127 
2129  const TensorInfo& output,
2130  const TransposeConvolution2dDescriptor& descriptor,
2131  const TensorInfo& weights,
2132  const Optional<TensorInfo>& biases,
2133  Optional<std::string&> reasonIfUnsupported) const
2134 {
2135  IgnoreUnused(descriptor);
2136  bool supported = true;
2137 
2138  std::array<DataType,7> supportedTypes =
2139  {
2147  };
2148 
2149  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2150  "Reference TransposeConvolution2d: input is not a supported type.");
2151 
2152  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2153  "Reference TransposeConvolution2d: output is not a supported type.");
2154 
2155  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2156  "Reference TransposeConvolution2d: input and output types mismatched.");
2157 
2158 
2159  const DataType inputType = input.GetDataType();
2160  if (IsQuantized8BitType(inputType))
2161  {
2163  std::array<DataType, 4> supportedWeightTypes =
2164  {
2168  DataType::QuantizedSymm8PerAxis //Deprecated
2169  };
2171 
2172  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2173  "Reference TransposeConvolution2d: weights type not supported for "
2174  "quantized input.");
2175  }
2176  else
2177  {
2178  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2179  "Reference TransposeConvolution2d: weights is not a supported type.");
2180 
2181  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2182  "Reference TransposeConvolution2d: input and weights types mismatched.");
2183  }
2184 
2185  if (biases.has_value())
2186  {
2187  std::array<DataType,4> biasesSupportedTypes =
2188  {
2193  };
2194  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2195  "Reference TransposeConvolution2d: biases is not a supported type.");
2196  }
2197 
2198  return supported;
2199 }
2200 
2202  const TensorInfo& output,
2203  const TransposeDescriptor& descriptor,
2204  Optional<std::string&> reasonIfUnsupported) const
2205 {
2206  IgnoreUnused(descriptor);
2207  bool supported = true;
2208 
2209  // Define supported output and inputs types.
2210  std::array<DataType, 6> supportedTypes =
2211  {
2218  };
2219 
2220  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2221  "Reference transpose: input is not a supported type.");
2222 
2223  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2224  "Reference transpose: output is not a supported type.");
2225 
2226  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2227  "Reference transpose: input and output types are mismatched.");
2228 
2229  return supported;
2230 }
2231 
2232 } // namespace armnn
bool IsEqualSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:73
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:254
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PadDescriptor for the PadLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:32
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A QLstmDescriptor for the QLstmLayer.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
min(a, max(b, input)) ReLu1 & ReLu6.
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:93
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A MeanDescriptor for the MeanLayer.
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })