ArmNN
 21.05
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/Descriptors.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <vector>
18 #include <array>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
61 bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
62  Optional<std::string&> reasonIfUnsupported) const
63 {
64  return IsElementwiseUnarySupported(input,
65  output,
67  reasonIfUnsupported);
68 }
69 
71  const TensorInfo& output,
72  const ActivationDescriptor& descriptor,
73  Optional<std::string&> reasonIfUnsupported) const
74 {
75  bool supported = true;
76 
77  // Define supported types.
78  std::array<DataType,6> supportedTypes = {
85  };
86 
87  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
88  "Reference activation: input type not supported.");
89 
90  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
91  "Reference activation: output type not supported.");
92 
93  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
94  "Reference activation: input and output types mismatched.");
95 
96  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
97  "Reference activation: input and output shapes are of different rank.");
98 
99 
100  struct ActivationFunctionSupported : public Rule
101  {
102  ActivationFunctionSupported(const ActivationDescriptor& desc)
103  {
104  switch(desc.m_Function)
105  {
118  {
119  m_Res = true;
120  break;
121  }
122  default:
123  {
124  m_Res = false;
125  break;
126  }
127  }
128  }
129  };
130 
131  // Function is supported
132  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
133  "Reference activation: function not supported.");
134 
135  return supported;
136 }
137 
139  const TensorInfo& input1,
140  const TensorInfo& output,
141  Optional<std::string&> reasonIfUnsupported) const
142 {
143  bool supported = true;
144 
145  std::array<DataType,7> supportedTypes = {
153  };
154 
155  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
156  "Reference addition: input 0 is not a supported type.");
157 
158  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
159  "Reference addition: input 1 is not a supported type.");
160 
161  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
162  "Reference addition: output is not a supported type.");
163 
164  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
165  "Reference addition: input 0 and Input 1 types are mismatched");
166 
167  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
168  "Reference addition: input and output types are mismatched");
169 
170  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
171  "Reference addition: shapes are not suitable for implicit broadcast.");
172 
173  return supported;
174 }
175 
177  const armnn::ArgMinMaxDescriptor &descriptor,
178  armnn::Optional<std::string &> reasonIfUnsupported) const
179 {
180  IgnoreUnused(descriptor);
181 
182  std::array<DataType, 8> supportedInputTypes =
183  {
192  };
193 
194  std::array<DataType,2> supportedOutputTypes = {
196  DataType::Signed64
197  };
198 
199  bool supported = true;
200 
201  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
202  "Reference ArgMinMax: input is not a supported type.");
203  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
204  "Reference ArgMinMax: output type not supported");
205 
206  return supported;
207 }
208 
210  const TensorInfo& output,
211  const TensorInfo& mean,
212  const TensorInfo& variance,
213  const TensorInfo& beta,
214  const TensorInfo& gamma,
215  const BatchNormalizationDescriptor& descriptor,
216  Optional<std::string&> reasonIfUnsupported) const
217 {
218  IgnoreUnused(descriptor);
219 
220  std::array<DataType, 6> supportedTypes =
221  {
228  };
229 
230  bool supported = true;
231 
232  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
233  "Reference batch normalization: input is not a supported type.");
234 
235  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
236  "Reference batch normalization: output is not a supported type.");
237 
238  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
239  "Reference batch normalization: input and output types are mismatched");
240 
241  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
242  "Reference batch normalization: mean is not a supported type.");
243 
244  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
245  "Reference batch normalization: variance is not a supported type.");
246 
247  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
248  "Reference batch normalization: beta is not a supported type.");
249 
250  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
251  "Reference batch normalization: gamma is not a supported type.");
252 
253  return supported;
254 }
255 
257  const TensorInfo& output,
258  const BatchToSpaceNdDescriptor& descriptor,
259  Optional<std::string&> reasonIfUnsupported) const
260 {
261  IgnoreUnused(descriptor);
262 
263  bool supported = true;
264 
265  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
266  std::string inputTensorStr = "input";
267  std::string outputTensorStr = "output";
268 
269  // Define supported types.
270  std::array<DataType,6> supportedTypes =
271  {
278  };
279 
280  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
281  "Reference BatchToSpaceNd: input type not supported.");
282 
283  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
284  "Reference BatchToSpaceNd: output type not supported.");
285 
286  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
287  "Reference BatchToSpaceNd: input and output types mismatched.");
288 
289  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
290  reasonIfUnsupported,
291  CreateIncorrectDimensionsErrorMsg(4,
292  output.GetNumDimensions(),
293  batchToSpaceNdLayerStr,
294  outputTensorStr).data());
295 
296  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
297  reasonIfUnsupported,
298  CreateIncorrectDimensionsErrorMsg(4,
299  input.GetNumDimensions(),
300  batchToSpaceNdLayerStr,
301  inputTensorStr).data());
302 
303  return supported;
304 }
305 
307  const TensorInfo& output,
308  Optional<std::string&> reasonIfUnsupported) const
309 {
310  std::array<DataType, 9> supportedInputTypes =
311  {
320  };
321 
322  bool supported = true;
323  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
324  "Reference cast: input is not a supported type");
325 
326 
327  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
328  "Reference cast: output is not a supported type");
329 
330  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
331  "Reference cast: input and output shapes have different number of total elements");
332 
333  return supported;
334 }
335 
337  const TensorInfo& input1,
338  const TensorInfo& output,
339  const ComparisonDescriptor& descriptor,
340  Optional<std::string&> reasonIfUnsupported) const
341 {
342  IgnoreUnused(descriptor);
343  std::array<DataType, 8> supportedInputTypes =
344  {
353  };
354 
355  bool supported = true;
356  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
357  "Reference comparison: input 0 is not a supported type");
358 
359  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
360  "Reference comparison: input 0 and Input 1 types are mismatched");
361 
362  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
363  "Reference comparison: output is not of type Boolean");
364 
365  return supported;
366 }
367 
368 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
369  const TensorInfo& output,
370  const ConcatDescriptor& descriptor,
371  Optional<std::string&> reasonIfUnsupported) const
372 {
373  IgnoreUnused(descriptor);
374 
375  bool supported = true;
376  std::array<DataType,6> supportedTypes =
377  {
384  };
385 
386  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
387  "Reference concatenation: output type not supported");
388  for (const TensorInfo* input : inputs)
389  {
390  ARMNN_ASSERT(input != nullptr);
391  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
392  "Reference concatenation: input type not supported");
393 
394  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
395  "Reference concatenation: input and output types mismatched.");
396  }
397 
398  return supported;
399 }
400 
402  Optional<std::string&> reasonIfUnsupported) const
403 {
404  std::array<DataType,8> supportedTypes =
405  {
414  };
415 
416  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
417  "Reference constant: output is not a supported type.");
418 }
419 
421  const TensorInfo& output,
422  Optional<std::string&> reasonIfUnsupported) const
423 {
424  bool supported = true;
425 
426  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
427  "Reference for ConvertBf16ToFp32 layer: input type not supported");
428 
429  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
430  "Reference for ConvertBf16ToFp32 layer: output type not supported");
431 
432  return supported;
433 }
434 
436  const TensorInfo& output,
437  Optional<std::string&> reasonIfUnsupported) const
438 {
439  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
440  input.GetDataType(),
441  &TrueFunc<>,
442  &FalseInputFuncF32<>,
443  &FalseFuncU8<>,
444  &FalseFuncI32<>,
445  &FalseFuncU8<>) &&
446  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
447  output.GetDataType(),
448  &FalseOutputFuncF16<>,
449  &TrueFunc<>,
450  &FalseFuncU8<>,
451  &FalseFuncI32<>,
452  &FalseFuncU8<>));
453 }
454 
456  const TensorInfo& output,
457  Optional<std::string&> reasonIfUnsupported) const
458 {
459  bool supported = true;
460 
461  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
462  "Reference for ConvertFp32ToBf16 layer: input type not supported");
463 
464  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
465  "Reference for ConvertFp32ToBf16 layer: output type not supported");
466 
467  return supported;
468 }
469 
471  const TensorInfo& output,
472  Optional<std::string&> reasonIfUnsupported) const
473 {
474  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
475  input.GetDataType(),
476  &FalseInputFuncF16<>,
477  &TrueFunc<>,
478  &FalseFuncU8<>,
479  &FalseFuncI32<>,
480  &FalseFuncU8<>) &&
481  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
482  output.GetDataType(),
483  &TrueFunc<>,
484  &FalseOutputFuncF32<>,
485  &FalseFuncU8<>,
486  &FalseFuncI32<>,
487  &FalseFuncU8<>));
488 }
489 
491  const TensorInfo& output,
492  const Convolution2dDescriptor& descriptor,
493  const TensorInfo& weights,
494  const Optional<TensorInfo>& biases,
495  Optional<std::string&> reasonIfUnsupported) const
496 {
497  bool supported = true;
498 
499  // Define supported types.
500  std::array<DataType,7> supportedTypes =
501  {
509  };
510 
511  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
512  "Reference Convolution2d: input is not a supported type.");
513 
514  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
515  "Reference Convolution2d: output is not a supported type.");
516 
517  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
518  if (input.GetDataType() == DataType::BFloat16)
519  {
520  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
521  {
522  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
523  supported = false;
524  }
525  }
526  else
527  {
528  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
529  "Reference Convolution2d: input and output types mismatched.");
530  }
531 
532  const DataType inputType = input.GetDataType();
533  if (IsQuantized8BitType(inputType))
534  {
536  std::array<DataType, 4> supportedWeightTypes =
537  {
541  DataType::QuantizedSymm8PerAxis // deprecated
542  };
544 
545  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
546  "Reference Convolution2d: weights type not supported for quantized input.");
547  }
548  else
549  {
550  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
551  "Reference Convolution2d: weights is not a supported type.");
552 
553  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
554  "Reference Convolution2d: input and weights types mismatched.");
555  }
556 
557  if (biases.has_value())
558  {
559  std::array<DataType,4> biasesSupportedTypes =
560  {
565  };
566 
567  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
568  "Reference Convolution2d: biases is not a supported type.");
569  }
570  IgnoreUnused(descriptor);
571 
572  return supported;
573 }
574 
576  const TensorInfo& output,
577  Optional<std::string&> reasonIfUnsupported) const
578 {
579  bool supported = true;
580 
581  std::array<DataType, 8> supportedTypes =
582  {
591  };
592 
593  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
594  "Reference for Debug layer: input type not supported");
595 
596  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
597  "Reference for Debug layer: output type not supported");
598 
599  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
600  "Reference for Debug layer: input and output types are mismatched");
601 
602  return supported;
603 }
604 
606  const TensorInfo& output,
607  const DepthToSpaceDescriptor& descriptor,
608  Optional<std::string&> reasonIfUnsupported) const
609 {
610  IgnoreUnused(descriptor);
611  bool supported = true;
612 
613  std::array<DataType,6> supportedTypes =
614  {
621  };
622 
623  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
624  "Reference DepthToSpace: input type not supported");
625 
626  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
627  "Reference DepthToSpace: output type not supported");
628 
629  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
630  "Reference DepthToSpace: input and output types are mismatched");
631 
632  return supported;
633 }
634 
636  const TensorInfo& output,
637  const DepthwiseConvolution2dDescriptor& descriptor,
638  const TensorInfo& weights,
639  const Optional<TensorInfo>& biases,
640  Optional<std::string&> reasonIfUnsupported) const
641 {
642  IgnoreUnused(descriptor);
643  bool supported = true;
644 
645  // Define supported types.
646  std::array<DataType,7> supportedTypes =
647  {
655  };
656 
657  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
658  "Reference DepthwiseConvolution2d: input is not a supported type.");
659 
660  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
661  "Reference DepthwiseConvolution2d: output is not a supported type.");
662 
663  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
664  "Reference DepthwiseConvolution2d: input and output types mismatched.");
665 
666  const DataType inputType = input.GetDataType();
667  if (IsQuantized8BitType(inputType))
668  {
670  std::array<DataType, 4> supportedWeightTypes =
671  {
675  DataType::QuantizedSymm8PerAxis // deprecated
676  };
678 
679  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
680  "Reference DepthwiseConvolution2d: weights type not supported for "
681  "quantized input.");
682  }
683  else
684  {
685  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
686  "Reference DepthwiseConvolution2d: weights is not a supported type.");
687 
688  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
689  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
690  }
691 
692  if (biases.has_value())
693  {
694  std::array<DataType,4> biasesSupportedTypes =
695  {
700  };
701  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
702  "Reference DepthwiseConvolution2d: biases is not a supported type.");
703  }
704 
705  return supported;
706 
707 }
708 
710  const TensorInfo& output,
711  Optional<std::string&> reasonIfUnsupported) const
712 {
713  bool supported = true;
714 
715  std::array<DataType,4> supportedInputTypes = {
720  };
721 
722  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
723  "Reference for Dequantize layer: input type not supported.");
724 
725  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
726  "Reference for Dequantize layer: per-axis quantized input not support .");
727 
728  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
729  "Reference dequantize: per-axis quantized input not support .");
730 
731  std::array<DataType,3> supportedOutputTypes = {
735  };
736 
737  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
738  "Reference for Dequantize layer: output type not supported.");
739 
740  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
741  "Reference for Dequantize layer: input/output shapes have different num total "
742  "elements.");
743 
744  return supported;
745 }
746 
748  const TensorInfo& scores,
749  const TensorInfo& anchors,
750  const TensorInfo& detectionBoxes,
751  const TensorInfo& detectionClasses,
752  const TensorInfo& detectionScores,
753  const TensorInfo& numDetections,
754  const DetectionPostProcessDescriptor& descriptor,
755  Optional<std::string&> reasonIfUnsupported) const
756 {
757  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
758 
759  bool supported = true;
760 
761  std::array<DataType,6> supportedInputTypes =
762  {
769  };
770 
771  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
772  "Reference DetectionPostProcess: input 0 is not a supported type.");
773 
774  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
775  "Reference DetectionPostProcess: input 1 is not a supported type.");
776 
777  return supported;
778 }
779 
781  const TensorInfo& output,
782  const DepthwiseConvolution2dDescriptor& descriptor,
783  const TensorInfo& weights,
784  const Optional<TensorInfo>& biases,
785  Optional<std::string&> reasonIfUnsupported) const
786 {
787  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
788 }
789 
791  const TensorInfo& input1,
792  const TensorInfo& output,
793  Optional<std::string&> reasonIfUnsupported) const
794 {
795  bool supported = true;
796 
797  std::array<DataType,7> supportedTypes = {
805  };
806 
807  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
808  "Reference division: input 0 is not a supported type.");
809 
810  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
811  "Reference division: input 1 is not a supported type.");
812 
813  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
814  "Reference division: output is not a supported type.");
815 
816  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
817  "Reference division: input 0 and Input 1 types are mismatched");
818 
819  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
820  "Reference division: input and output types are mismatched");
821 
822  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
823  "Reference division: shapes are not suitable for implicit broadcast.");
824 
825  return supported;
826 }
827 
829  const TensorInfo& output,
830  const ElementwiseUnaryDescriptor& descriptor,
831  Optional<std::string&> reasonIfUnsupported) const
832 {
833  IgnoreUnused(descriptor);
834 
835  std::array<DataType, 7> supportedTypes =
836  {
844  };
845 
846  std::array<DataType, 1> logicalSupportedTypes =
847  {
849  };
850 
851  bool supported = true;
852 
853  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
854  {
855  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
856  "Reference elementwise unary: input type not supported");
857 
858  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
859  "Reference elementwise unary: output type not supported");
860  }
861  else
862  {
863  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
864  "Reference elementwise unary: input type not supported");
865 
866  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
867  "Reference elementwise unary: output type not supported");
868  }
869 
870  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
871  "Reference elementwise unary: input and output types not matching");
872 
873  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
874  "Reference elementwise unary: input and output shapes"
875  "have different number of total elements");
876 
877  return supported;
878 }
879 
881  const TensorInfo& input1,
882  const TensorInfo& output,
883  Optional<std::string&> reasonIfUnsupported) const
884 {
885  return IsComparisonSupported(input0,
886  input1,
887  output,
889  reasonIfUnsupported);
890 }
891 
893  const FakeQuantizationDescriptor& descriptor,
894  Optional<std::string&> reasonIfUnsupported) const
895 {
896  IgnoreUnused(descriptor);
897  bool supported = true;
898 
899  std::array<DataType,1> supportedTypes =
900  {
902  };
903 
904  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
905  "Reference fake quantization: input type not supported.");
906 
907  return supported;
908 }
909 
911  const TensorInfo& output,
912  const FillDescriptor& descriptor,
913  Optional<std::string&> reasonIfUnsupported) const
914 {
915  IgnoreUnused(descriptor);
916  IgnoreUnused(output);
917 
918  bool supported = true;
919 
920  std::array<DataType,3> supportedTypes =
921  {
925  };
926 
927  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
928  "Reference Fill: input type not supported.");
929 
930  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
931  "Reference Fill: output type not supported.");
932  return supported;
933 }
934 
936  const TensorInfo& output,
937  Optional<std::string&> reasonIfUnsupported) const
938 {
939  IgnoreUnused(output);
940  bool supported = true;
941 
942  std::array<DataType,3> supportedTypes =
943  {
947  };
948 
949  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
950  "Reference Floor: input type not supported.");
951 
952  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
953  "Reference Floor: output type not supported.");
954 
955  return supported;
956 }
957 
959  const TensorInfo& output,
960  const TensorInfo& weights,
961  const TensorInfo& biases,
962  const FullyConnectedDescriptor& descriptor,
963  Optional<std::string&> reasonIfUnsupported) const
964 {
965  bool supported = true;
966 
967  // Define supported types.
968  std::array<DataType,6> supportedTypes =
969  {
976  };
977 
978  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
979  "Reference Fully Connected: input type not supported.");
980 
981  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
982  "Reference Fully Connected: output type not supported.");
983 
984  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
985  "Reference Fully Connected: weights type not supported.");
986 
987  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
988  if (input.GetDataType() == DataType::BFloat16)
989  {
990  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
991  {
992  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
993  supported = false;
994  }
995  }
996  else
997  {
998  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
999  "Reference Fully Connected: input and output types mismatched.");
1000  }
1001 
1002  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1003  "Reference Fully Connected: weights is not a supported type.");
1004 
1005  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1006  "Reference Fully Connected: input and weights types mismatched.");
1007 
1008  if (descriptor.m_BiasEnabled)
1009  {
1010  // Defined supported types for bias
1011  std::array<DataType, 5>
1012  supportedBiasTypes =
1013  {
1019  };
1020 
1021  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1022  "Reference Fully Connected: bias type not supported.");
1023 
1024  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1025  "Reference Fully Connected: bias and weight types mismatch.");
1026 
1027  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1028  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1029 
1030  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1031  "Reference Fully Connected: bias must have 1 dimension.");
1032 
1033  }
1034 
1035  return supported;
1036 }
1037 
1039  const armnn::TensorInfo& input1,
1040  const armnn::TensorInfo& output,
1041  const GatherDescriptor& descriptor,
1042  armnn::Optional<std::string&> reasonIfUnsupported) const
1043 {
1044  bool supported = true;
1045  std::array<DataType,7> supportedTypes =
1046  {
1054  };
1055 
1056  if (descriptor.m_Axis != 0)
1057  {
1058  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1059  supported &= false;
1060  }
1061  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1062  "Reference Gather: input type not supported");
1063 
1064  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1065  "Reference Gather: output type not supported");
1066 
1067  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1068  "Reference Gather: indices (input1) type not supported");
1069 
1070  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1071  "Reference Gather: input and output types not matching");
1072 
1073  return supported;
1074 }
1075 
1077  const TensorInfo& input1,
1078  const TensorInfo& output,
1079  Optional<std::string&> reasonIfUnsupported) const
1080 {
1081  return IsComparisonSupported(input0,
1082  input1,
1083  output,
1085  reasonIfUnsupported);
1086 }
1087 
1089  Optional<std::string&> /*reasonIfUnsupported*/) const
1090 {
1091  return true;
1092 }
1093 
1095  const TensorInfo& output,
1096  const InstanceNormalizationDescriptor& descriptor,
1097  Optional<std::string&> reasonIfUnsupported) const
1098 {
1099  IgnoreUnused(descriptor);
1100  // Define supported types
1101  std::array<DataType, 3> supportedTypes =
1102  {
1106  };
1107 
1108  bool supported = true;
1109 
1110  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1111  "Reference Instance Normalization: input type not supported.");
1112 
1113  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1114  "Reference Instance Normalization: output type not supported.");
1115 
1116  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1117  "Reference Instance Normalization: input and output types mismatched.");
1118 
1119  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1120  "Reference Instance Normalization: input and output shapes have different "
1121  "num total elements.");
1122 
1123  return supported;
1124 }
1125 
1127  const TensorInfo& output,
1128  const L2NormalizationDescriptor& descriptor,
1129  Optional<std::string&> reasonIfUnsupported) const
1130 {
1131  IgnoreUnused(descriptor);
1132  // Define supported types
1133  std::array<DataType, 6> supportedTypes =
1134  {
1141  };
1142 
1143  bool supported = true;
1144 
1145  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1146  "Reference L2normalization: input type not supported.");
1147 
1148  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1149  "Reference L2normalization: output type not supported.");
1150 
1151  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1152  "Reference L2normalization: input and output types mismatched.");
1153 
1154  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1155  "Reference L2normalization: input and output shapes have different "
1156  "num total elements.");
1157 
1158  return supported;
1159 }
1160 
1162  const TensorInfo& input1,
1163  const TensorInfo& output,
1164  const LogicalBinaryDescriptor& descriptor,
1165  Optional<std::string&> reasonIfUnsupported) const
1166 {
1167  IgnoreUnused(descriptor);
1168 
1169  std::array<DataType, 1> supportedTypes =
1170  {
1172  };
1173 
1174  bool supported = true;
1175  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1176  "Reference LogicalBinary: input 0 type not supported");
1177  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1178  "Reference LogicalBinary: input 1 type not supported");
1179 
1180  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1181  "Reference LogicalBinary: input and output types do not match");
1182 
1183  return supported;
1184 }
1185 
1187  const TensorInfo& output,
1188  const LogSoftmaxDescriptor& descriptor,
1189  Optional<std::string&> reasonIfUnsupported) const
1190 {
1191  IgnoreUnused(descriptor);
1192 
1193  std::array<DataType, 3> supportedTypes =
1194  {
1198  };
1199 
1200  bool supported = true;
1201  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1202  "Reference LogSoftmax: input type not supported");
1203 
1204  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1205  "Reference LogSoftmax: output type not supported");
1206 
1207  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1208  "Reference LogSoftmax: input and output types do not match");
1209 
1210  return supported;
1211 }
1212 
1214  const TensorInfo& outputStateIn,
1215  const TensorInfo& cellStateIn,
1216  const TensorInfo& scratchBuffer,
1217  const TensorInfo& outputStateOut,
1218  const TensorInfo& cellStateOut,
1219  const TensorInfo& output,
1220  const LstmDescriptor& descriptor,
1221  const LstmInputParamsInfo& paramsInfo,
1222  Optional<std::string&> reasonIfUnsupported) const
1223 {
1224  IgnoreUnused(descriptor);
1225  IgnoreUnused(paramsInfo);
1226 
1227  bool supported = true;
1228 
1229  std::array<DataType,3> supportedTypes = {
1233  };
1234 
1235  // check inputs and outputs
1236  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1237  "Reference Lstm: input is not a supported type.");
1238  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1239  "Reference Lstm: input and outputStateIn types are mismatched");
1240  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1241  "Reference Lstm: input and cellStateIn types are mismatched");
1242  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1243  "Reference Lstm: input and scratchBuffer types are mismatched");
1244  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1245  "Reference Lstm: input and outputStateOut types are mismatched");
1246  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1247  "Reference Lstm: input and cellStateOut types are mismatched");
1248  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1249  "Reference Lstm: input and output types are mismatched");
1250  // check layer parameters
1251  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1252  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1253  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1254  "Reference Lstm: input and InputToCellWeights types are mismatched");
1255  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1256  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1257  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1258  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1259  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1260  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1261  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1262  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1263  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1264  "Reference Lstm: input and ForgetGateBias types are mismatched");
1265  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1266  "Reference Lstm: input and CellBias types are mismatched");
1267  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1268  "Reference Lstm: input and OutputGateBias types are mismatched");
1269  if (!descriptor.m_CifgEnabled)
1270  {
1271  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1272  "Reference Lstm: input and InputToInputWeights types are mismatched");
1273  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1274  reasonIfUnsupported,
1275  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1276  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1277  "Reference Lstm: input and InputGateBias types are mismatched");
1278  if (descriptor.m_PeepholeEnabled)
1279  {
1280  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1281  reasonIfUnsupported,
1282  "Reference Lstm: input and CellToInputWeights types are mismatched");
1283  }
1284  }
1285  if (descriptor.m_PeepholeEnabled)
1286  {
1287  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1288  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1289  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1290  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1291  }
1292  if (descriptor.m_ProjectionEnabled)
1293  {
1294  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1295  "Reference Lstm: input and mProjectionWeights types are mismatched");
1296  if (paramsInfo.m_ProjectionBias != nullptr)
1297  {
1298  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1299  "Reference Lstm: input and ProjectionBias types are mismatched");
1300  }
1301  }
1302  if (descriptor.m_LayerNormEnabled)
1303  {
1304  if (!descriptor.m_CifgEnabled)
1305  {
1306  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1307  reasonIfUnsupported,
1308  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1309  }
1310  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1311  reasonIfUnsupported,
1312  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1313  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1314  reasonIfUnsupported,
1315  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1316  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1317  reasonIfUnsupported,
1318  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1319  }
1320 
1321  return supported;
1322 }
1323 
1325  const TensorInfo& input1,
1326  const TensorInfo& output,
1327  Optional<std::string&> reasonIfUnsupported) const
1328 {
1329  bool supported = true;
1330 
1331  std::array<DataType,7> supportedTypes = {
1339  };
1340 
1341  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1342  "Reference maximum: input 0 is not a supported type.");
1343 
1344  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1345  "Reference maximum: input 1 is not a supported type.");
1346 
1347  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1348  "Reference maximum: output is not a supported type.");
1349 
1350  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1351  "Reference maximum: input 0 and Input 1 types are mismatched");
1352 
1353  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1354  "Reference maximum: input and output types are mismatched");
1355 
1356  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1357  "Reference maximum: shapes are not suitable for implicit broadcast.");
1358 
1359  return supported;
1360 }
1361 
1363  const TensorInfo& output,
1364  const MeanDescriptor& descriptor,
1365  Optional<std::string&> reasonIfUnsupported) const
1366 {
1367  bool supported = true;
1368  std::string meanLayerStr = "Mean";
1369  std::string outputTensorStr = "output";
1370 
1371  std::array<DataType,6> supportedTypes =
1372  {
1379  };
1380 
1381  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1382  "Reference Mean: input type not supported.");
1383 
1384  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1385  "Reference Mean: input and output types are mismatched");
1386 
1387  if (descriptor.m_KeepDims)
1388  {
1389  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1390  reasonIfUnsupported,
1391  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1392  output.GetNumDimensions(),
1393  meanLayerStr, outputTensorStr).data());
1394  }
1395  else if (descriptor.m_Axis.empty())
1396  {
1397  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1398  reasonIfUnsupported,
1399  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1400  meanLayerStr, outputTensorStr).data());
1401  }
1402  else
1403  {
1404  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1405 
1406  if (outputDim > 0)
1407  {
1408  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1409  reasonIfUnsupported,
1410  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1411  meanLayerStr, outputTensorStr).data());
1412  }
1413  else
1414  {
1415  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1416  reasonIfUnsupported,
1417  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1418  meanLayerStr, outputTensorStr).data());
1419  }
1420  }
1421 
1422  return supported;
1423 }
1424 
1425 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
1426  const TensorInfo& output,
1427  const MergerDescriptor& descriptor,
1428  Optional<std::string&> reasonIfUnsupported) const
1429 {
1430  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1431 }
1432 
1434  const TensorInfo &output,
1435  Optional<std::string &> reasonIfUnsupported) const
1436 {
1437  bool supported = true;
1438 
1439  std::array<DataType,7> supportedTypes =
1440  {
1448  };
1449 
1450  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1451  "Reference MemCopy: input type not supported");
1452 
1453  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1454  "Reference MemCopy: output type not supported");
1455 
1456  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1457  "Reference MemCopy: input and output types are mismatched");
1458 
1459  return supported;
1460 }
1461 
1463  const TensorInfo& input1,
1464  const TensorInfo& output,
1465  Optional<std::string&> reasonIfUnsupported) const
1466 {
1467  bool supported = true;
1468 
1469  std::array<DataType,7> supportedTypes = {
1477  };
1478 
1479  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1480  "Reference minimum: input 0 is not a supported type.");
1481 
1482  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1483  "Reference minimum: input 1 is not a supported type.");
1484 
1485  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1486  "Reference minimum: output is not a supported type.");
1487 
1488  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1489  "Reference minimum: input 0 and Input 1 types are mismatched");
1490 
1491  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1492  "Reference minimum: input and output types are mismatched");
1493 
1494  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1495  "Reference minimum: shapes are not suitable for implicit broadcast.");
1496 
1497  return supported;
1498 }
1499 
1501  const TensorInfo& input1,
1502  const TensorInfo& output,
1503  Optional<std::string&> reasonIfUnsupported) const
1504 {
1505  bool supported = true;
1506 
1507  std::array<DataType,7> supportedTypes = {
1515  };
1516 
1517  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1518  "Reference multiplication: input 0 is not a supported type.");
1519 
1520  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1521  "Reference multiplication: input 1 is not a supported type.");
1522 
1523  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1524  "Reference multiplication: output is not a supported type.");
1525 
1526  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1527  "Reference multiplication: input 0 and Input 1 types are mismatched");
1528 
1529  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1530  "Reference multiplication: input and output types are mismatched");
1531 
1532  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1533  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1534 
1535  return supported;
1536 }
1537 
1539  const TensorInfo& output,
1540  const NormalizationDescriptor& descriptor,
1541  Optional<std::string&> reasonIfUnsupported) const
1542 {
1543  IgnoreUnused(descriptor);
1544 
1545  // Define supported types
1546  std::array<DataType, 6> supportedTypes =
1547  {
1554  };
1555 
1556  bool supported = true;
1557 
1558  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1559  "Reference normalization: input type not supported.");
1560 
1561  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1562  "Reference normalization: output type not supported.");
1563 
1564  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1565  "Reference normalization: input and output shapes have different "
1566  "num total elements.");
1567 
1568  return supported;
1569 }
1570 
1572  Optional<std::string&> /*reasonIfUnsupported*/) const
1573 {
1574  return true;
1575 }
1576 
1578  const TensorInfo& output,
1579  const PadDescriptor& descriptor,
1580  Optional<std::string&> reasonIfUnsupported) const
1581 {
1582  IgnoreUnused(descriptor);
1583  bool supported = true;
1584 
1585  // Define supported output and inputs types.
1586  std::array<DataType,6> supportedTypes =
1587  {
1594  };
1595 
1596  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1597  "Reference pad: input is not a supported type.");
1598 
1599  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1600  "Reference pad: output is not a supported type.");
1601 
1602  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1603  "Reference pad: input and output types are mismatched.");
1604 
1605  return supported;
1606 }
1607 
1609  const TensorInfo& output,
1610  const PermuteDescriptor& descriptor,
1611  Optional<std::string&> reasonIfUnsupported) const
1612 {
1613  IgnoreUnused(descriptor);
1614  bool supported = true;
1615 
1616  // Define supported output and inputs types.
1617  std::array<DataType, 6> supportedTypes =
1618  {
1625  };
1626 
1627  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1628  "Reference permute: input is not a supported type.");
1629 
1630  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1631  "Reference permute: output is not a supported type.");
1632 
1633  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1634  "Reference permute: input and output types are mismatched.");
1635 
1636  return supported;
1637 }
1638 
1640  const TensorInfo& output,
1641  const Pooling2dDescriptor& descriptor,
1642  Optional<std::string&> reasonIfUnsupported) const
1643 {
1644  IgnoreUnused(descriptor);
1645  bool supported = true;
1646 
1647  // Define supported output and inputs types.
1648  std::array<DataType,6> supportedTypes =
1649  {
1656  };
1657 
1658  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1659  "Reference poolind2d: input is not a supported type.");
1660 
1661  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1662  "Reference poolind2d: output is not a supported type.");
1663 
1664  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1665  "Reference poolind2d: input and output types are mismatched.");
1666 
1667  return supported;
1668 }
1669 
1671  const TensorInfo& previousOutputIn,
1672  const TensorInfo& previousCellStateIn,
1673  const TensorInfo& outputStateOut,
1674  const TensorInfo& cellStateOut,
1675  const TensorInfo& output,
1676  const QLstmDescriptor& descriptor,
1677  const LstmInputParamsInfo& paramsInfo,
1678  Optional<std::string&> reasonIfUnsupported) const
1679 {
1680  IgnoreUnused(input);
1681  IgnoreUnused(previousOutputIn);
1682  IgnoreUnused(previousCellStateIn);
1683  IgnoreUnused(outputStateOut);
1684  IgnoreUnused(cellStateOut);
1685  IgnoreUnused(output);
1686  IgnoreUnused(descriptor);
1687  IgnoreUnused(paramsInfo);
1688 
1689  IgnoreUnused(reasonIfUnsupported);
1690 
1691  return true;
1692 }
1693 
1695  const TensorInfo& output,
1696  Optional<std::string&> reasonIfUnsupported) const
1697 {
1698  bool supported = true;
1699 
1700  // Define supported input types.
1701  std::array<DataType,7> supportedInputTypes = {
1709  };
1710 
1711  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1712  "Reference quantize: input type not supported.");
1713 
1714  // Define supported output types.
1715  std::array<DataType,4> supportedOutputTypes = {
1719  DataType::QSymmS16
1720  };
1721  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1722  "Reference quantize: output type not supported.");
1723 
1724  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1725  "Reference quantize: input and output shapes have different num total elements.");
1726 
1727  return supported;
1728 }
1729 
1731  const TensorInfo& output,
1732  Optional<std::string&> reasonIfUnsupported) const
1733 {
1734  IgnoreUnused(input);
1735  // Define supported output types.
1736  std::array<DataType,1> supportedOutputTypes =
1737  {
1739  };
1740 
1741  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1742  "Reference rank: input type not supported.");
1743 }
1744 
1746  const TensorInfo& output,
1747  const ReduceDescriptor& descriptor,
1748  Optional<std::string&> reasonIfUnsupported) const
1749 {
1750  IgnoreUnused(descriptor);
1751  bool supported = true;
1752  std::array<DataType,7> supportedTypes =
1753  {
1761  };
1762 
1763  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1764  "Reference Reduce: input type not supported");
1765 
1766  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1767  "Reference Reduce: output type not supported");
1768 
1769  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1770  "Reference Reduce: input and output types not matching");
1771 
1772  return supported;
1773 }
1774 
1776  const TensorInfo& output,
1777  const ReshapeDescriptor& descriptor,
1778  Optional<std::string&> reasonIfUnsupported) const
1779 {
1780  IgnoreUnused(output);
1781  IgnoreUnused(descriptor);
1782  // Define supported output types.
1783  std::array<DataType,8> supportedOutputTypes =
1784  {
1793  };
1794 
1795  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1796  "Reference reshape: input type not supported.");
1797 }
1798 
1800  const TensorInfo& output,
1801  Optional<std::string&> reasonIfUnsupported) const
1802 {
1803  bool supported = true;
1804  std::array<DataType,6> supportedTypes =
1805  {
1812  };
1813 
1814  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1815  "Reference ResizeBilinear: input type not supported");
1816 
1817  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1818  "Reference ResizeBilinear: output type not supported");
1819 
1820  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1821  "Reference ResizeBilinear: input and output types not matching");
1822 
1823  return supported;
1824 }
1825 
1827  const TensorInfo& output,
1828  const ResizeDescriptor& descriptor,
1829  Optional<std::string&> reasonIfUnsupported) const
1830 {
1831  IgnoreUnused(descriptor);
1832  bool supported = true;
1833  std::array<DataType,6> supportedTypes =
1834  {
1841  };
1842 
1843  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1844  "Reference Resize: input type not supported");
1845 
1846  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1847  "Reference Resize: output type not supported");
1848 
1849  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1850  "Reference Resize: input and output types not matching");
1851 
1852  return supported;
1853 }
1854 
1856  const TensorInfo& output,
1857  Optional<std::string&> reasonIfUnsupported) const
1858 {
1859  return IsElementwiseUnarySupported(input,
1860  output,
1862  reasonIfUnsupported);
1863 }
1864 
1866  const TensorInfo& output,
1867  const SliceDescriptor& descriptor,
1868  Optional<std::string&> reasonIfUnsupported) const
1869 {
1870  IgnoreUnused(descriptor);
1871  bool supported = true;
1872 
1873  std::array<DataType, 5> supportedTypes =
1874  {
1880  };
1881 
1882  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1883  "Reference Slice: input type not supported");
1884 
1885  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1886  "Reference Slice: output type not supported");
1887 
1888  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1889  "Reference Slice: input and output types are mismatched");
1890 
1891  return supported;
1892 }
1893 
1895  const TensorInfo& output,
1896  const SoftmaxDescriptor& descriptor,
1897  Optional<std::string&> reasonIfUnsupported) const
1898 {
1899  IgnoreUnused(descriptor);
1900  bool supported = true;
1901  std::array<DataType,7> supportedTypes =
1902  {
1910  };
1911 
1912  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1913  "Reference Softmax: output type not supported");
1914 
1915  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1916  "Reference Softmax: input type not supported");
1917 
1918  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1919  "Reference Softmax: input type not supported");
1920 
1921  return supported;
1922 }
1923 
1925  const TensorInfo& output,
1926  const SpaceToBatchNdDescriptor& descriptor,
1927  Optional<std::string&> reasonIfUnsupported) const
1928 {
1929  IgnoreUnused(descriptor);
1930  bool supported = true;
1931  std::array<DataType,6> supportedTypes =
1932  {
1939  };
1940 
1941  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1942  "Reference SpaceToBatchNd: input type not supported");
1943 
1944  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1945  "Reference SpaceToBatchNd: output type not supported");
1946 
1947  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1948  "Reference SpaceToBatchNd: input and output types are mismatched");
1949 
1950  return supported;
1951 }
1952 
1954  const TensorInfo& output,
1955  const SpaceToDepthDescriptor& descriptor,
1956  Optional<std::string&> reasonIfUnsupported) const
1957 {
1958 
1959  IgnoreUnused(descriptor);
1960  bool supported = true;
1961 
1962  std::array<DataType,6> supportedTypes =
1963  {
1970  };
1971 
1972  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1973  "Reference SpaceToDepth: input type not supported");
1974 
1975  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1976  "Reference SpaceToDepth: output type not supported");
1977 
1978  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1979  "Reference SpaceToDepth: input and output types are mismatched");
1980 
1981  return supported;
1982 }
1983 
1985  const ViewsDescriptor& descriptor,
1986  Optional<std::string&> reasonIfUnsupported) const
1987 {
1988  IgnoreUnused(descriptor);
1989  bool supported = true;
1990  std::array<DataType,6> supportedTypes =
1991  {
1998  };
1999 
2000  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2001  "Reference splitter: input type not supported");
2002 
2003  return supported;
2004 }
2005 
2007  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2008  const ViewsDescriptor& descriptor,
2009  Optional<std::string&> reasonIfUnsupported) const
2010 {
2011  IgnoreUnused(descriptor);
2012  bool supported = true;
2013  std::array<DataType,6> supportedTypes =
2014  {
2021  };
2022 
2023  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2024  "Reference splitter: output type not supported");
2025  for (const TensorInfo& output : outputs)
2026  {
2027  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2028  "Reference splitter: input type not supported");
2029 
2030  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2031  "Reference splitter: input and output types mismatched.");
2032  }
2033 
2034  return supported;
2035 }
2036 
2037 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2038  const TensorInfo& output,
2039  const StackDescriptor& descriptor,
2040  Optional<std::string&> reasonIfUnsupported) const
2041 {
2042  IgnoreUnused(descriptor);
2043 
2044  bool supported = true;
2045  std::array<DataType,6> supportedTypes =
2046  {
2053  };
2054 
2055  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2056  "Reference stack: output type not supported");
2057  for (const TensorInfo* input : inputs)
2058  {
2059  ARMNN_ASSERT(input != nullptr);
2060  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2061  "Reference stack: input type not supported");
2062 
2063  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2064  "Reference stack: input and output types mismatched.");
2065  }
2066 
2067  return supported;
2068 }
2069 
2071  const TensorInfo& output,
2072  const StridedSliceDescriptor& descriptor,
2073  Optional<std::string&> reasonIfUnsupported) const
2074 {
2075  IgnoreUnused(descriptor);
2076  bool supported = true;
2077 
2078  std::array<DataType,5> supportedTypes =
2079  {
2085  };
2086 
2087  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2088  "Reference StridedSlice: input type not supported");
2089 
2090  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2091  "Reference StridedSlice: output type not supported");
2092 
2093  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2094  "Reference StridedSlice: input and output types are mismatched");
2095 
2096  return supported;
2097 }
2098 
2100  const TensorInfo& input1,
2101  const TensorInfo& output,
2102  Optional<std::string&> reasonIfUnsupported) const
2103 {
2104  bool supported = true;
2105 
2106  std::array<DataType,7> supportedTypes = {
2114  };
2115 
2116  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2117  "Reference subtraction: input 0 is not a supported type.");
2118 
2119  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2120  "Reference subtraction: input 1 is not a supported type.");
2121 
2122  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2123  "Reference subtraction: output is not a supported type.");
2124 
2125  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2126  "Reference subtraction: input 0 and Input 1 types are mismatched");
2127 
2128  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2129  "Reference subtraction: input and output types are mismatched");
2130 
2131  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2132  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2133 
2134  return supported;
2135 }
2136 
2138  const TensorInfo& alpha,
2139  const TensorInfo& output,
2140  Optional<std::string&> reasonIfUnsupported) const
2141 {
2142  bool supported = true;
2143 
2144  std::array<DataType, 6> supportedTypes
2145  {
2152  };
2153 
2154  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2155  "PReLU: input is not a supported type.");
2156 
2157  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2158  "PReLU: alpha is not a supported type.");
2159 
2160  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2161  "PReLU: output is not a supported type.");
2162 
2163  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2164  "PReLU: input, alpha and output types are mismatched");
2165 
2166  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2167  "PReLU: shapes are not suitable for implicit broadcast");
2168 
2169  return supported;
2170 }
2171 
2173  const TensorInfo& output,
2174  const TransposeConvolution2dDescriptor& descriptor,
2175  const TensorInfo& weights,
2176  const Optional<TensorInfo>& biases,
2177  Optional<std::string&> reasonIfUnsupported) const
2178 {
2179  IgnoreUnused(descriptor);
2180  bool supported = true;
2181 
2182  std::array<DataType,7> supportedTypes =
2183  {
2191  };
2192 
2193  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2194  "Reference TransposeConvolution2d: input is not a supported type.");
2195 
2196  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2197  "Reference TransposeConvolution2d: output is not a supported type.");
2198 
2199  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2200  "Reference TransposeConvolution2d: input and output types mismatched.");
2201 
2202 
2203  const DataType inputType = input.GetDataType();
2204  if (IsQuantized8BitType(inputType))
2205  {
2207  std::array<DataType, 4> supportedWeightTypes =
2208  {
2212  DataType::QuantizedSymm8PerAxis //Deprecated
2213  };
2215 
2216  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2217  "Reference TransposeConvolution2d: weights type not supported for "
2218  "quantized input.");
2219  }
2220  else
2221  {
2222  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2223  "Reference TransposeConvolution2d: weights is not a supported type.");
2224 
2225  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2226  "Reference TransposeConvolution2d: input and weights types mismatched.");
2227  }
2228 
2229  if (biases.has_value())
2230  {
2231  std::array<DataType,4> biasesSupportedTypes =
2232  {
2237  };
2238  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2239  "Reference TransposeConvolution2d: biases is not a supported type.");
2240  }
2241 
2242  return supported;
2243 }
2244 
2246  const TensorInfo& output,
2247  const TransposeDescriptor& descriptor,
2248  Optional<std::string&> reasonIfUnsupported) const
2249 {
2250  IgnoreUnused(descriptor);
2251  bool supported = true;
2252 
2253  // Define supported output and inputs types.
2254  std::array<DataType, 6> supportedTypes =
2255  {
2262  };
2263 
2264  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2265  "Reference transpose: input is not a supported type.");
2266 
2267  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2268  "Reference transpose: output is not a supported type.");
2269 
2270  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2271  "Reference transpose: input and output types are mismatched.");
2272 
2273  return supported;
2274 }
2275 
2276 } // namespace armnn
bool IsEqualSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
A ReshapeDescriptor for the ReshapeLayer.
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:265
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PadDescriptor for the PadLayer.
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:36
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_BiasEnabled
Enable/disable bias.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A QLstmDescriptor for the QLstmLayer.
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
min(a, max(b, input)) ReLu1 & ReLu6.
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SliceDescriptor for the SliceLayer.
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A MeanDescriptor for the MeanLayer.
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
A TransposeDescriptor for the TransposeLayer.
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
A StridedSliceDescriptor for the StridedSliceLayer.
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A PermuteDescriptor for the PermuteLayer.
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })