ArmNN
 20.11
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsAbsSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsEqualSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGreaterSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeBilinearSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRsqrtSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsAbsSupported()

bool IsAbsSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

References armnn::Abs, and RefLayerSupport::IsElementwiseUnarySupported().

63 {
64  return IsElementwiseUnarySupported(input,
65  output,
66  ElementwiseUnaryDescriptor(UnaryOperation::Abs),
67  reasonIfUnsupported);
68 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 70 of file RefLayerSupport.cpp.

References armnn::Abs, armnn::BFloat16, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

74 {
75  bool supported = true;
76 
77  // Define supported types.
78  std::array<DataType,6> supportedTypes = {
85  };
86 
87  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
88  "Reference activation: input type not supported.");
89 
90  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
91  "Reference activation: output type not supported.");
92 
93  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
94  "Reference activation: input and output types mismatched.");
95 
96  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
97  "Reference activation: input and output shapes are of different rank.");
98 
99 
100  struct ActivationFunctionSupported : public Rule
101  {
102  ActivationFunctionSupported(const ActivationDescriptor& desc)
103  {
104  switch(desc.m_Function)
105  {
118  {
119  m_Res = true;
120  break;
121  }
122  default:
123  {
124  m_Res = false;
125  break;
126  }
127  }
128  }
129  };
130 
131  // Function is supported
132  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
133  "Reference activation: function not supported.");
134 
135  return supported;
136 }
ISubgraphViewConverter supported
min(a, max(b, input)) ReLu1 & ReLu6.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 138 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by BOOST_AUTO_TEST_CASE().

142 {
143  bool supported = true;
144 
145  std::array<DataType,7> supportedTypes = {
153  };
154 
155  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
156  "Reference addition: input 0 is not a supported type.");
157 
158  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
159  "Reference addition: input 1 is not a supported type.");
160 
161  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
162  "Reference addition: output is not a supported type.");
163 
164  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
165  "Reference addition: input 0 and Input 1 types are mismatched");
166 
167  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
168  "Reference addition: input and output types are mismatched");
169 
170  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
171  "Reference addition: shapes are not suitable for implicit broadcast.");
172 
173  return supported;
174 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 176 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

179 {
180  IgnoreUnused(descriptor);
181 
182  std::array<DataType, 7> supportedTypes =
183  {
191  };
192 
193  bool supported = true;
194 
195  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
196  "Reference ArgMinMax: input is not a supported type.");
197  supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported,
198  "Reference ArgMinMax: output type not supported");
199 
200  return supported;
201 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 203 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

211 {
212  IgnoreUnused(descriptor);
213 
214  std::array<DataType, 6> supportedTypes =
215  {
222  };
223 
224  bool supported = true;
225 
226  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
227  "Reference batch normalization: input is not a supported type.");
228 
229  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
230  "Reference batch normalization: output is not a supported type.");
231 
232  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
233  "Reference batch normalization: input and output types are mismatched");
234 
235  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
236  "Reference batch normalization: mean is not a supported type.");
237 
238  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
239  "Reference batch normalization: variance is not a supported type.");
240 
241  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
242  "Reference batch normalization: beta is not a supported type.");
243 
244  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
245  "Reference batch normalization: gamma is not a supported type.");
246 
247  return supported;
248 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 250 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

254 {
255  IgnoreUnused(descriptor);
256 
257  bool supported = true;
258 
259  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
260  std::string inputTensorStr = "input";
261  std::string outputTensorStr = "output";
262 
263  // Define supported types.
264  std::array<DataType,6> supportedTypes =
265  {
272  };
273 
274  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
275  "Reference BatchToSpaceNd: input type not supported.");
276 
277  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
278  "Reference BatchToSpaceNd: output type not supported.");
279 
280  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
281  "Reference BatchToSpaceNd: input and output types mismatched.");
282 
283  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
284  reasonIfUnsupported,
285  CreateIncorrectDimensionsErrorMsg(4,
286  output.GetNumDimensions(),
287  batchToSpaceNdLayerStr,
288  outputTensorStr).data());
289 
290  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
291  reasonIfUnsupported,
292  CreateIncorrectDimensionsErrorMsg(4,
293  input.GetNumDimensions(),
294  batchToSpaceNdLayerStr,
295  inputTensorStr).data());
296 
297  return supported;
298 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 300 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsEqualSupported(), and RefLayerSupport::IsGreaterSupported().

305 {
306  IgnoreUnused(descriptor);
307  std::array<DataType, 8> supportedInputTypes =
308  {
317  };
318 
319  bool supported = true;
320  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
321  "Reference comparison: input 0 is not a supported type");
322 
323  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
324  "Reference comparison: input 0 and Input 1 types are mismatched");
325 
326  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
327  "Reference comparison: output is not of type Boolean");
328 
329  return supported;
330 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const ConcatDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 332 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsMergerSupported().

336 {
337  IgnoreUnused(descriptor);
338 
339  bool supported = true;
340  std::array<DataType,6> supportedTypes =
341  {
348  };
349 
350  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
351  "Reference concatenation: output type not supported");
352  for (const TensorInfo* input : inputs)
353  {
354  ARMNN_ASSERT(input != nullptr);
355  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
356  "Reference concatenation: input type not supported");
357 
358  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
359  "Reference concatenation: input and output types mismatched.");
360  }
361 
362  return supported;
363 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 365 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

367 {
368  std::array<DataType,8> supportedTypes =
369  {
378  };
379 
380  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
381  "Reference constant: output is not a supported type.");
382 }
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 384 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

387 {
388  bool supported = true;
389 
390  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
391  "Reference for ConvertBf16ToFp32 layer: input type not supported");
392 
393  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
394  "Reference for ConvertBf16ToFp32 layer: output type not supported");
395 
396  return supported;
397 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 399 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

402 {
403  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
404  input.GetDataType(),
405  &TrueFunc<>,
406  &FalseInputFuncF32<>,
407  &FalseFuncU8<>,
408  &FalseFuncI32<>,
409  &FalseFuncU8<>) &&
410  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
411  output.GetDataType(),
412  &FalseOutputFuncF16<>,
413  &TrueFunc<>,
414  &FalseFuncU8<>,
415  &FalseFuncI32<>,
416  &FalseFuncU8<>));
417 }
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 419 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

422 {
423  bool supported = true;
424 
425  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
426  "Reference for ConvertFp32ToBf16 layer: input type not supported");
427 
428  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
429  "Reference for ConvertFp32ToBf16 layer: output type not supported");
430 
431  return supported;
432 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 434 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

437 {
438  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
439  input.GetDataType(),
440  &FalseInputFuncF16<>,
441  &TrueFunc<>,
442  &FalseFuncU8<>,
443  &FalseFuncI32<>,
444  &FalseFuncU8<>) &&
445  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
446  output.GetDataType(),
447  &TrueFunc<>,
448  &FalseOutputFuncF32<>,
449  &FalseFuncU8<>,
450  &FalseFuncI32<>,
451  &FalseFuncU8<>));
452 }
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 454 of file RefLayerSupport.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::QuantizedSymm8PerAxis, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

460 {
461  bool supported = true;
462 
463  // Define supported types.
464  std::array<DataType,7> supportedTypes =
465  {
473  };
474 
475  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
476  "Reference Convolution2d: input is not a supported type.");
477 
478  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
479  "Reference Convolution2d: output is not a supported type.");
480 
481  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
482  if (input.GetDataType() == DataType::BFloat16)
483  {
484  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
485  {
486  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
487  supported = false;
488  }
489  }
490  else
491  {
492  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
493  "Reference Convolution2d: input and output types mismatched.");
494  }
495 
496  const DataType inputType = input.GetDataType();
497  if (IsQuantized8BitType(inputType))
498  {
500  std::array<DataType, 4> supportedWeightTypes =
501  {
505  DataType::QuantizedSymm8PerAxis // deprecated
506  };
508 
509  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
510  "Reference Convolution2d: weights type not supported for quantized input.");
511  }
512  else
513  {
514  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
515  "Reference Convolution2d: weights is not a supported type.");
516 
517  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
518  "Reference Convolution2d: input and weights types mismatched.");
519  }
520 
521  if (biases.has_value())
522  {
523  std::array<DataType,4> biasesSupportedTypes =
524  {
529  };
530 
531  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
532  "Reference Convolution2d: biases is not a supported type.");
533  }
534  IgnoreUnused(descriptor);
535 
536  return supported;
537 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:254
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 539 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

542 {
543  bool supported = true;
544 
545  std::array<DataType, 8> supportedTypes =
546  {
555  };
556 
557  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
558  "Reference for Debug layer: input type not supported");
559 
560  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
561  "Reference for Debug layer: output type not supported");
562 
563  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
564  "Reference for Debug layer: input and output types are mismatched");
565 
566  return supported;
567 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 569 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

573 {
574  IgnoreUnused(descriptor);
575  bool supported = true;
576 
577  std::array<DataType,6> supportedTypes =
578  {
585  };
586 
587  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
588  "Reference DepthToSpace: input type not supported");
589 
590  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
591  "Reference DepthToSpace: output type not supported");
592 
593  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
594  "Reference DepthToSpace: input and output types are mismatched");
595 
596  return supported;
597 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 599 of file RefLayerSupport.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::QuantizedSymm8PerAxis, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported().

605 {
606  IgnoreUnused(descriptor);
607  bool supported = true;
608 
609  // Define supported types.
610  std::array<DataType,7> supportedTypes =
611  {
619  };
620 
621  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
622  "Reference DepthwiseConvolution2d: input is not a supported type.");
623 
624  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
625  "Reference DepthwiseConvolution2d: output is not a supported type.");
626 
627  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
628  "Reference DepthwiseConvolution2d: input and output types mismatched.");
629 
630  const DataType inputType = input.GetDataType();
631  if (IsQuantized8BitType(inputType))
632  {
634  std::array<DataType, 4> supportedWeightTypes =
635  {
639  DataType::QuantizedSymm8PerAxis // deprecated
640  };
642 
643  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
644  "Reference DepthwiseConvolution2d: weights type not supported for "
645  "quantized input.");
646  }
647  else
648  {
649  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
650  "Reference DepthwiseConvolution2d: weights is not a supported type.");
651 
652  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
653  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
654  }
655 
656  if (biases.has_value())
657  {
658  std::array<DataType,4> biasesSupportedTypes =
659  {
664  };
665  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
666  "Reference DepthwiseConvolution2d: biases is not a supported type.");
667  }
668 
669  return supported;
670 
671 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:254
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 673 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

676 {
677  bool supported = true;
678 
679  std::array<DataType,4> supportedInputTypes = {
684  };
685 
686  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
687  "Reference for Dequantize layer: input type not supported.");
688 
689  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
690  "Reference for Dequantize layer: per-axis quantized input not support .");
691 
692  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
693  "Reference dequantize: per-axis quantized input not support .");
694 
695  std::array<DataType,3> supportedOutputTypes = {
699  };
700 
701  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
702  "Reference for Dequantize layer: output type not supported.");
703 
704  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
705  "Reference for Dequantize layer: input/output shapes have different num total "
706  "elements.");
707 
708  return supported;
709 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 711 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

720 {
721  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
722 
723  bool supported = true;
724 
725  std::array<DataType,6> supportedInputTypes =
726  {
733  };
734 
735  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
736  "Reference DetectionPostProcess: input 0 is not a supported type.");
737 
738  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
739  "Reference DetectionPostProcess: input 1 is not a supported type.");
740 
741  return supported;
742 }
ISubgraphViewConverter supported
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
void IgnoreUnused(Ts &&...)
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 744 of file RefLayerSupport.cpp.

References RefLayerSupport::IsDepthwiseConvolutionSupported().

750 {
751  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
752 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 754 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

758 {
759  bool supported = true;
760 
761  std::array<DataType,7> supportedTypes = {
769  };
770 
771  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
772  "Reference division: input 0 is not a supported type.");
773 
774  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
775  "Reference division: input 1 is not a supported type.");
776 
777  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
778  "Reference division: output is not a supported type.");
779 
780  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
781  "Reference division: input 0 and Input 1 types are mismatched");
782 
783  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
784  "Reference division: input and output types are mismatched");
785 
786  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
787  "Reference division: shapes are not suitable for implicit broadcast.");
788 
789  return supported;
790 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 792 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsAbsSupported(), and RefLayerSupport::IsRsqrtSupported().

796 {
797  IgnoreUnused(descriptor);
798 
799  std::array<DataType, 7> supportedTypes =
800  {
808  };
809 
810  std::array<DataType, 1> logicalSupportedTypes =
811  {
813  };
814 
815  bool supported = true;
816 
817  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
818  {
819  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
820  "Reference elementwise unary: input type not supported");
821 
822  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
823  "Reference elementwise unary: output type not supported");
824  }
825  else
826  {
827  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
828  "Reference elementwise unary: input type not supported");
829 
830  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
831  "Reference elementwise unary: output type not supported");
832  }
833 
834  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
835  "Reference elementwise unary: input and output types not matching");
836 
837  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
838  "Reference elementwise unary: input and output shapes"
839  "have different number of total elements");
840 
841  return supported;
842 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsEqualSupported()

bool IsEqualSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 844 of file RefLayerSupport.cpp.

References armnn::Equal, and RefLayerSupport::IsComparisonSupported().

848 {
849  return IsComparisonSupported(input0,
850  input1,
851  output,
852  ComparisonDescriptor(ComparisonOperation::Equal),
853  reasonIfUnsupported);
854 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 856 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

859 {
860  IgnoreUnused(descriptor);
861  bool supported = true;
862 
863  std::array<DataType,1> supportedTypes =
864  {
866  };
867 
868  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
869  "Reference fake quantization: input type not supported.");
870 
871  return supported;
872 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 874 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

878 {
879  IgnoreUnused(descriptor);
880  IgnoreUnused(output);
881 
882  bool supported = true;
883 
884  std::array<DataType,3> supportedTypes =
885  {
889  };
890 
891  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
892  "Reference Fill: input type not supported.");
893 
894  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
895  "Reference Fill: output type not supported.");
896  return supported;
897 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 899 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

902 {
903  IgnoreUnused(output);
904  bool supported = true;
905 
906  std::array<DataType,3> supportedTypes =
907  {
911  };
912 
913  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
914  "Reference Floor: input type not supported.");
915 
916  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
917  "Reference Floor: output type not supported.");
918 
919  return supported;
920 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 922 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

928 {
929  bool supported = true;
930 
931  // Define supported types.
932  std::array<DataType,6> supportedTypes =
933  {
940  };
941 
942  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
943  "Reference Fully Connected: input type not supported.");
944 
945  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
946  "Reference Fully Connected: output type not supported.");
947 
948  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
949  "Reference Fully Connected: weights type not supported.");
950 
951  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
952  if (input.GetDataType() == DataType::BFloat16)
953  {
954  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
955  {
956  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
957  supported = false;
958  }
959  }
960  else
961  {
962  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
963  "Reference Fully Connected: input and output types mismatched.");
964  }
965 
966  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
967  "Reference Fully Connected: weights is not a supported type.");
968 
969  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
970  "Reference Fully Connected: input and weights types mismatched.");
971 
972  if (descriptor.m_BiasEnabled)
973  {
974  // Defined supported types for bias
975  std::array<DataType, 5>
976  supportedBiasTypes =
977  {
983  };
984 
985  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
986  "Reference Fully Connected: bias type not supported.");
987 
988  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
989  "Reference Fully Connected: bias and weight types mismatch.");
990 
991  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
992  "Reference Fully Connected: bias type inferred from weights is incompatible.");
993 
994  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
995  "Reference Fully Connected: bias must have 1 dimension.");
996 
997  }
998 
999  return supported;
1000 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1002 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, GatherDescriptor::m_Axis, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

1007 {
1008  bool supported = true;
1009  std::array<DataType,7> supportedTypes =
1010  {
1018  };
1019 
1020  if (descriptor.m_Axis != 0)
1021  {
1022  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1023  supported &= false;
1024  }
1025  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1026  "Reference Gather: input type not supported");
1027 
1028  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1029  "Reference Gather: output type not supported");
1030 
1031  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1032  "Reference Gather: indices (input1) type not supported");
1033 
1034  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1035  "Reference Gather: input and output types not matching");
1036 
1037  return supported;
1038 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsGreaterSupported()

bool IsGreaterSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1040 of file RefLayerSupport.cpp.

References armnn::Greater, and RefLayerSupport::IsComparisonSupported().

1044 {
1045  return IsComparisonSupported(input0,
1046  input1,
1047  output,
1048  ComparisonDescriptor(ComparisonOperation::Greater),
1049  reasonIfUnsupported);
1050 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1052 of file RefLayerSupport.cpp.

1054 {
1055  return true;
1056 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1058 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

1062 {
1063  IgnoreUnused(descriptor);
1064  // Define supported types
1065  std::array<DataType, 3> supportedTypes =
1066  {
1070  };
1071 
1072  bool supported = true;
1073 
1074  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1075  "Reference Instance Normalization: input type not supported.");
1076 
1077  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1078  "Reference Instance Normalization: output type not supported.");
1079 
1080  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1081  "Reference Instance Normalization: input and output types mismatched.");
1082 
1083  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1084  "Reference Instance Normalization: input and output shapes have different "
1085  "num total elements.");
1086 
1087  return supported;
1088 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1090 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1094 {
1095  IgnoreUnused(descriptor);
1096  // Define supported types
1097  std::array<DataType, 6> supportedTypes =
1098  {
1105  };
1106 
1107  bool supported = true;
1108 
1109  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1110  "Reference L2normalization: input type not supported.");
1111 
1112  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1113  "Reference L2normalization: output type not supported.");
1114 
1115  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1116  "Reference L2normalization: input and output types mismatched.");
1117 
1118  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1119  "Reference L2normalization: input and output shapes have different "
1120  "num total elements.");
1121 
1122  return supported;
1123 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1125 of file RefLayerSupport.cpp.

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

1130 {
1131  IgnoreUnused(descriptor);
1132 
1133  std::array<DataType, 1> supportedTypes =
1134  {
1136  };
1137 
1138  bool supported = true;
1139  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1140  "Reference LogicalBinary: input 0 type not supported");
1141  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1142  "Reference LogicalBinary: input 1 type not supported");
1143 
1144  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1145  "Reference LogicalBinary: input and output types do not match");
1146 
1147  return supported;
1148 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogicalUnarySupported()

bool IsLogicalUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1150 of file RefLayerSupport.cpp.

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

1154 {
1155  IgnoreUnused(descriptor);
1156 
1157  std::array<DataType, 1> supportedTypes =
1158  {
1160  };
1161 
1162  bool supported = true;
1163  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1164  "Reference LogicalUnary: input type not supported");
1165 
1166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1167  "Reference LogicalUnary: input and output types do not match");
1168 
1169  return supported;
1170 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1172 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

1176 {
1177  IgnoreUnused(descriptor);
1178 
1179  std::array<DataType, 3> supportedTypes =
1180  {
1184  };
1185 
1186  bool supported = true;
1187  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1188  "Reference LogSoftmax: input type not supported");
1189 
1190  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1191  "Reference LogSoftmax: output type not supported");
1192 
1193  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1194  "Reference LogSoftmax: input and output types do not match");
1195 
1196  return supported;
1197 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1199 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

1209 {
1210  IgnoreUnused(descriptor);
1211  IgnoreUnused(paramsInfo);
1212 
1213  bool supported = true;
1214 
1215  std::array<DataType,3> supportedTypes = {
1219  };
1220 
1221  // check inputs and outputs
1222  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1223  "Reference Lstm: input is not a supported type.");
1224  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1225  "Reference Lstm: input and outputStateIn types are mismatched");
1226  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1227  "Reference Lstm: input and cellStateIn types are mismatched");
1228  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1229  "Reference Lstm: input and scratchBuffer types are mismatched");
1230  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1231  "Reference Lstm: input and outputStateOut types are mismatched");
1232  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1233  "Reference Lstm: input and cellStateOut types are mismatched");
1234  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1235  "Reference Lstm: input and output types are mismatched");
1236  // check layer parameters
1237  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1238  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1239  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1240  "Reference Lstm: input and InputToCellWeights types are mismatched");
1241  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1242  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1243  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1244  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1245  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1246  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1247  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1248  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1249  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1250  "Reference Lstm: input and ForgetGateBias types are mismatched");
1251  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1252  "Reference Lstm: input and CellBias types are mismatched");
1253  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1254  "Reference Lstm: input and OutputGateBias types are mismatched");
1255  if (!descriptor.m_CifgEnabled)
1256  {
1257  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1258  "Reference Lstm: input and InputToInputWeights types are mismatched");
1259  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1260  reasonIfUnsupported,
1261  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1262  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1263  "Reference Lstm: input and InputGateBias types are mismatched");
1264  if (descriptor.m_PeepholeEnabled)
1265  {
1266  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1267  reasonIfUnsupported,
1268  "Reference Lstm: input and CellToInputWeights types are mismatched");
1269  }
1270  }
1271  if (descriptor.m_PeepholeEnabled)
1272  {
1273  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1274  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1275  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1276  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1277  }
1278  if (descriptor.m_ProjectionEnabled)
1279  {
1280  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1281  "Reference Lstm: input and mProjectionWeights types are mismatched");
1282  if (paramsInfo.m_ProjectionBias != nullptr)
1283  {
1284  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1285  "Reference Lstm: input and ProjectionBias types are mismatched");
1286  }
1287  }
1288  if (descriptor.m_LayerNormEnabled)
1289  {
1290  if (!descriptor.m_CifgEnabled)
1291  {
1292  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1293  reasonIfUnsupported,
1294  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1295  }
1296  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1297  reasonIfUnsupported,
1298  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1299  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1300  reasonIfUnsupported,
1301  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1302  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1303  reasonIfUnsupported,
1304  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1305  }
1306 
1307  return supported;
1308 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1310 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1314 {
1315  bool supported = true;
1316 
1317  std::array<DataType,7> supportedTypes = {
1325  };
1326 
1327  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1328  "Reference maximum: input 0 is not a supported type.");
1329 
1330  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1331  "Reference maximum: input 1 is not a supported type.");
1332 
1333  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1334  "Reference maximum: output is not a supported type.");
1335 
1336  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1337  "Reference maximum: input 0 and Input 1 types are mismatched");
1338 
1339  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1340  "Reference maximum: input and output types are mismatched");
1341 
1342  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1343  "Reference maximum: shapes are not suitable for implicit broadcast.");
1344 
1345  return supported;
1346 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1348 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::numeric_cast(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1352 {
1353  bool supported = true;
1354  std::string meanLayerStr = "Mean";
1355  std::string outputTensorStr = "output";
1356 
1357  std::array<DataType,6> supportedTypes =
1358  {
1365  };
1366 
1367  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1368  "Reference Mean: input type not supported.");
1369 
1370  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1371  "Reference Mean: input and output types are mismatched");
1372 
1373  if (descriptor.m_KeepDims)
1374  {
1375  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1376  reasonIfUnsupported,
1377  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1378  output.GetNumDimensions(),
1379  meanLayerStr, outputTensorStr).data());
1380  }
1381  else if (descriptor.m_Axis.empty())
1382  {
1383  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1384  reasonIfUnsupported,
1385  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1386  meanLayerStr, outputTensorStr).data());
1387  }
1388  else
1389  {
1390  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1391 
1392  if (outputDim > 0)
1393  {
1394  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1395  reasonIfUnsupported,
1396  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1397  meanLayerStr, outputTensorStr).data());
1398  }
1399  else
1400  {
1401  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1402  reasonIfUnsupported,
1403  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1404  meanLayerStr, outputTensorStr).data());
1405  }
1406  }
1407 
1408  return supported;
1409 }
ISubgraphViewConverter supported
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1419 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1422 {
1423  bool supported = true;
1424 
1425  std::array<DataType,7> supportedTypes =
1426  {
1434  };
1435 
1436  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1437  "Reference MemCopy: input type not supported");
1438 
1439  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1440  "Reference MemCopy: output type not supported");
1441 
1442  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1443  "Reference MemCopy: input and output types are mismatched");
1444 
1445  return supported;
1446 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMergerSupported()

bool IsMergerSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const MergerDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1411 of file RefLayerSupport.cpp.

References RefLayerSupport::IsConcatSupported().

1415 {
1416  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1417 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1448 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1452 {
1453  bool supported = true;
1454 
1455  std::array<DataType,7> supportedTypes = {
1463  };
1464 
1465  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1466  "Reference minimum: input 0 is not a supported type.");
1467 
1468  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1469  "Reference minimum: input 1 is not a supported type.");
1470 
1471  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1472  "Reference minimum: output is not a supported type.");
1473 
1474  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1475  "Reference minimum: input 0 and Input 1 types are mismatched");
1476 
1477  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1478  "Reference minimum: input and output types are mismatched");
1479 
1480  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1481  "Reference minimum: shapes are not suitable for implicit broadcast.");
1482 
1483  return supported;
1484 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1486 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1490 {
1491  bool supported = true;
1492 
1493  std::array<DataType,7> supportedTypes = {
1501  };
1502 
1503  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1504  "Reference multiplication: input 0 is not a supported type.");
1505 
1506  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1507  "Reference multiplication: input 1 is not a supported type.");
1508 
1509  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1510  "Reference multiplication: output is not a supported type.");
1511 
1512  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1513  "Reference multiplication: input 0 and Input 1 types are mismatched");
1514 
1515  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1516  "Reference multiplication: input and output types are mismatched");
1517 
1518  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1519  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1520 
1521  return supported;
1522 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1524 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1528 {
1529  IgnoreUnused(descriptor);
1530 
1531  // Define supported types
1532  std::array<DataType, 6> supportedTypes =
1533  {
1540  };
1541 
1542  bool supported = true;
1543 
1544  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1545  "Reference normalization: input type not supported.");
1546 
1547  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1548  "Reference normalization: output type not supported.");
1549 
1550  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1551  "Reference normalization: input and output shapes have different "
1552  "num total elements.");
1553 
1554  return supported;
1555 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1557 of file RefLayerSupport.cpp.

1559 {
1560  return true;
1561 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1563 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1567 {
1568  IgnoreUnused(descriptor);
1569  bool supported = true;
1570 
1571  // Define supported output and inputs types.
1572  std::array<DataType,6> supportedTypes =
1573  {
1580  };
1581 
1582  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1583  "Reference pad: input is not a supported type.");
1584 
1585  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1586  "Reference pad: output is not a supported type.");
1587 
1588  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1589  "Reference pad: input and output types are mismatched.");
1590 
1591  return supported;
1592 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1594 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1598 {
1599  IgnoreUnused(descriptor);
1600  bool supported = true;
1601 
1602  // Define supported output and inputs types.
1603  std::array<DataType, 6> supportedTypes =
1604  {
1611  };
1612 
1613  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1614  "Reference permute: input is not a supported type.");
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1617  "Reference permute: output is not a supported type.");
1618 
1619  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1620  "Reference permute: input and output types are mismatched.");
1621 
1622  return supported;
1623 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1625 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1629 {
1630  IgnoreUnused(descriptor);
1631  bool supported = true;
1632 
1633  // Define supported output and inputs types.
1634  std::array<DataType,6> supportedTypes =
1635  {
1642  };
1643 
1644  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1645  "Reference poolind2d: input is not a supported type.");
1646 
1647  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1648  "Reference poolind2d: output is not a supported type.");
1649 
1650  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1651  "Reference poolind2d: input and output types are mismatched.");
1652 
1653  return supported;
1654 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2093 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2097 {
2098  bool supported = true;
2099 
2100  std::array<DataType, 6> supportedTypes
2101  {
2108  };
2109 
2110  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2111  "PReLU: input is not a supported type.");
2112 
2113  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2114  "PReLU: alpha is not a supported type.");
2115 
2116  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2117  "PReLU: output is not a supported type.");
2118 
2119  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2120  "PReLU: input, alpha and output types are mismatched");
2121 
2122  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2123  "PReLU: shapes are not suitable for implicit broadcast");
2124 
2125  return supported;
2126 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1656 of file RefLayerSupport.cpp.

References armnn::IgnoreUnused().

1665 {
1666  IgnoreUnused(input);
1667  IgnoreUnused(previousOutputIn);
1668  IgnoreUnused(previousCellStateIn);
1669  IgnoreUnused(outputStateOut);
1670  IgnoreUnused(cellStateOut);
1671  IgnoreUnused(output);
1672  IgnoreUnused(descriptor);
1673  IgnoreUnused(paramsInfo);
1674 
1675  IgnoreUnused(reasonIfUnsupported);
1676 
1677  return true;
1678 }
void IgnoreUnused(Ts &&...)

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1680 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

1683 {
1684  bool supported = true;
1685 
1686  // Define supported input types.
1687  std::array<DataType,7> supportedInputTypes = {
1695  };
1696 
1697  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1698  "Reference quantize: input type not supported.");
1699 
1700  // Define supported output types.
1701  std::array<DataType,4> supportedOutputTypes = {
1705  DataType::QSymmS16
1706  };
1707  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1708  "Reference quantize: output type not supported.");
1709 
1710  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1711  "Reference quantize: input and output shapes have different num total elements.");
1712 
1713  return supported;
1714 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1716 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

1719 {
1720  IgnoreUnused(input);
1721  // Define supported output types.
1722  std::array<DataType,1> supportedOutputTypes =
1723  {
1725  };
1726 
1727  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1728  "Reference rank: input type not supported.");
1729 }
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1731 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1735 {
1736  IgnoreUnused(output);
1737  IgnoreUnused(descriptor);
1738  // Define supported output types.
1739  std::array<DataType,8> supportedOutputTypes =
1740  {
1749  };
1750 
1751  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1752  "Reference reshape: input type not supported.");
1753 }
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeBilinearSupported()

bool IsResizeBilinearSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1755 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1758 {
1759  bool supported = true;
1760  std::array<DataType,6> supportedTypes =
1761  {
1768  };
1769 
1770  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1771  "Reference ResizeBilinear: input type not supported");
1772 
1773  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1774  "Reference ResizeBilinear: output type not supported");
1775 
1776  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1777  "Reference ResizeBilinear: input and output types not matching");
1778 
1779  return supported;
1780 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1782 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1786 {
1787  IgnoreUnused(descriptor);
1788  bool supported = true;
1789  std::array<DataType,6> supportedTypes =
1790  {
1797  };
1798 
1799  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1800  "Reference Resize: input type not supported");
1801 
1802  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1803  "Reference Resize: output type not supported");
1804 
1805  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1806  "Reference Resize: input and output types not matching");
1807 
1808  return supported;
1809 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRsqrtSupported()

bool IsRsqrtSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1811 of file RefLayerSupport.cpp.

References RefLayerSupport::IsElementwiseUnarySupported(), and armnn::Rsqrt.

1814 {
1815  return IsElementwiseUnarySupported(input,
1816  output,
1817  ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
1818  reasonIfUnsupported);
1819 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1821 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1825 {
1826  IgnoreUnused(descriptor);
1827  bool supported = true;
1828 
1829  std::array<DataType, 5> supportedTypes =
1830  {
1836  };
1837 
1838  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1839  "Reference Slice: input type not supported");
1840 
1841  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1842  "Reference Slice: output type not supported");
1843 
1844  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1845  "Reference Slice: input and output types are mismatched");
1846 
1847  return supported;
1848 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1850 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

1854 {
1855  IgnoreUnused(descriptor);
1856  bool supported = true;
1857  std::array<DataType,7> supportedTypes =
1858  {
1866  };
1867 
1868  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1869  "Reference Softmax: output type not supported");
1870 
1871  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1872  "Reference Softmax: input type not supported");
1873 
1874  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1875  "Reference Softmax: input type not supported");
1876 
1877  return supported;
1878 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1880 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1884 {
1885  IgnoreUnused(descriptor);
1886  bool supported = true;
1887  std::array<DataType,6> supportedTypes =
1888  {
1895  };
1896 
1897  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1898  "Reference SpaceToBatchNd: input type not supported");
1899 
1900  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1901  "Reference SpaceToBatchNd: output type not supported");
1902 
1903  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1904  "Reference SpaceToBatchNd: input and output types are mismatched");
1905 
1906  return supported;
1907 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1909 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1913 {
1914 
1915  IgnoreUnused(descriptor);
1916  bool supported = true;
1917 
1918  std::array<DataType,6> supportedTypes =
1919  {
1926  };
1927 
1928  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1929  "Reference SpaceToDepth: input type not supported");
1930 
1931  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1932  "Reference SpaceToDepth: output type not supported");
1933 
1934  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1935  "Reference SpaceToDepth: input and output types are mismatched");
1936 
1937  return supported;
1938 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported() [1/2]

bool IsSplitterSupported ( const TensorInfo input,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1940 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1943 {
1944  IgnoreUnused(descriptor);
1945  bool supported = true;
1946  std::array<DataType,6> supportedTypes =
1947  {
1954  };
1955 
1956  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1957  "Reference splitter: input type not supported");
1958 
1959  return supported;
1960 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported() [2/2]

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1962 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1966 {
1967  IgnoreUnused(descriptor);
1968  bool supported = true;
1969  std::array<DataType,6> supportedTypes =
1970  {
1977  };
1978 
1979  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1980  "Reference splitter: output type not supported");
1981  for (const TensorInfo& output : outputs)
1982  {
1983  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1984  "Reference splitter: input type not supported");
1985 
1986  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1987  "Reference splitter: input and output types mismatched.");
1988  }
1989 
1990  return supported;
1991 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1993 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1997 {
1998  IgnoreUnused(descriptor);
1999 
2000  bool supported = true;
2001  std::array<DataType,6> supportedTypes =
2002  {
2009  };
2010 
2011  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2012  "Reference stack: output type not supported");
2013  for (const TensorInfo* input : inputs)
2014  {
2015  ARMNN_ASSERT(input != nullptr);
2016  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2017  "Reference stack: input type not supported");
2018 
2019  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2020  "Reference stack: input and output types mismatched.");
2021  }
2022 
2023  return supported;
2024 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2026 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2030 {
2031  IgnoreUnused(descriptor);
2032  bool supported = true;
2033 
2034  std::array<DataType,5> supportedTypes =
2035  {
2041  };
2042 
2043  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2044  "Reference StridedSlice: input type not supported");
2045 
2046  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2047  "Reference StridedSlice: output type not supported");
2048 
2049  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2050  "Reference StridedSlice: input and output types are mismatched");
2051 
2052  return supported;
2053 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2055 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

2059 {
2060  bool supported = true;
2061 
2062  std::array<DataType,7> supportedTypes = {
2070  };
2071 
2072  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2073  "Reference subtraction: input 0 is not a supported type.");
2074 
2075  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2076  "Reference subtraction: input 1 is not a supported type.");
2077 
2078  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2079  "Reference subtraction: output is not a supported type.");
2080 
2081  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2082  "Reference subtraction: input 0 and Input 1 types are mismatched");
2083 
2084  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2085  "Reference subtraction: input and output types are mismatched");
2086 
2087  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2088  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2089 
2090  return supported;
2091 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2128 of file RefLayerSupport.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::QuantizedSymm8PerAxis, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

2134 {
2135  IgnoreUnused(descriptor);
2136  bool supported = true;
2137 
2138  std::array<DataType,7> supportedTypes =
2139  {
2147  };
2148 
2149  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2150  "Reference TransposeConvolution2d: input is not a supported type.");
2151 
2152  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2153  "Reference TransposeConvolution2d: output is not a supported type.");
2154 
2155  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2156  "Reference TransposeConvolution2d: input and output types mismatched.");
2157 
2158 
2159  const DataType inputType = input.GetDataType();
2160  if (IsQuantized8BitType(inputType))
2161  {
2163  std::array<DataType, 4> supportedWeightTypes =
2164  {
2168  DataType::QuantizedSymm8PerAxis //Deprecated
2169  };
2171 
2172  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2173  "Reference TransposeConvolution2d: weights type not supported for "
2174  "quantized input.");
2175  }
2176  else
2177  {
2178  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2179  "Reference TransposeConvolution2d: weights is not a supported type.");
2180 
2181  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2182  "Reference TransposeConvolution2d: input and weights types mismatched.");
2183  }
2184 
2185  if (biases.has_value())
2186  {
2187  std::array<DataType,4> biasesSupportedTypes =
2188  {
2193  };
2194  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2195  "Reference TransposeConvolution2d: biases is not a supported type.");
2196  }
2197 
2198  return supported;
2199 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:254
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2201 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2205 {
2206  IgnoreUnused(descriptor);
2207  bool supported = true;
2208 
2209  // Define supported output and inputs types.
2210  std::array<DataType, 6> supportedTypes =
2211  {
2218  };
2219 
2220  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2221  "Reference transpose: input is not a supported type.");
2222 
2223  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2224  "Reference transpose: output is not a supported type.");
2225 
2226  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2227  "Reference transpose: input and output types are mismatched.");
2228 
2229  return supported;
2230 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

The documentation for this class was generated from the following files: