ArmNN
 21.05
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsAbsSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsEqualSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGreaterSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeBilinearSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRsqrtSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsAbsSupported()

bool IsAbsSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

References armnn::Abs, and RefLayerSupport::IsElementwiseUnarySupported().

63 {
64  return IsElementwiseUnarySupported(input,
65  output,
66  ElementwiseUnaryDescriptor(UnaryOperation::Abs),
67  reasonIfUnsupported);
68 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 70 of file RefLayerSupport.cpp.

References armnn::Abs, armnn::BFloat16, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

74 {
75  bool supported = true;
76 
77  // Define supported types.
78  std::array<DataType,6> supportedTypes = {
85  };
86 
87  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
88  "Reference activation: input type not supported.");
89 
90  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
91  "Reference activation: output type not supported.");
92 
93  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
94  "Reference activation: input and output types mismatched.");
95 
96  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
97  "Reference activation: input and output shapes are of different rank.");
98 
99 
100  struct ActivationFunctionSupported : public Rule
101  {
102  ActivationFunctionSupported(const ActivationDescriptor& desc)
103  {
104  switch(desc.m_Function)
105  {
118  {
119  m_Res = true;
120  break;
121  }
122  default:
123  {
124  m_Res = false;
125  break;
126  }
127  }
128  }
129  };
130 
131  // Function is supported
132  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
133  "Reference activation: function not supported.");
134 
135  return supported;
136 }
ISubgraphViewConverter supported
min(a, max(b, input)) ReLu1 & ReLu6.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 138 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by BOOST_AUTO_TEST_CASE().

142 {
143  bool supported = true;
144 
145  std::array<DataType,7> supportedTypes = {
153  };
154 
155  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
156  "Reference addition: input 0 is not a supported type.");
157 
158  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
159  "Reference addition: input 1 is not a supported type.");
160 
161  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
162  "Reference addition: output is not a supported type.");
163 
164  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
165  "Reference addition: input 0 and Input 1 types are mismatched");
166 
167  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
168  "Reference addition: input and output types are mismatched");
169 
170  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
171  "Reference addition: shapes are not suitable for implicit broadcast.");
172 
173  return supported;
174 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 176 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

179 {
180  IgnoreUnused(descriptor);
181 
182  std::array<DataType, 8> supportedInputTypes =
183  {
192  };
193 
194  std::array<DataType,2> supportedOutputTypes = {
196  DataType::Signed64
197  };
198 
199  bool supported = true;
200 
201  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
202  "Reference ArgMinMax: input is not a supported type.");
203  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
204  "Reference ArgMinMax: output type not supported");
205 
206  return supported;
207 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 209 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

217 {
218  IgnoreUnused(descriptor);
219 
220  std::array<DataType, 6> supportedTypes =
221  {
228  };
229 
230  bool supported = true;
231 
232  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
233  "Reference batch normalization: input is not a supported type.");
234 
235  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
236  "Reference batch normalization: output is not a supported type.");
237 
238  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
239  "Reference batch normalization: input and output types are mismatched");
240 
241  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
242  "Reference batch normalization: mean is not a supported type.");
243 
244  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
245  "Reference batch normalization: variance is not a supported type.");
246 
247  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
248  "Reference batch normalization: beta is not a supported type.");
249 
250  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
251  "Reference batch normalization: gamma is not a supported type.");
252 
253  return supported;
254 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 256 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

260 {
261  IgnoreUnused(descriptor);
262 
263  bool supported = true;
264 
265  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
266  std::string inputTensorStr = "input";
267  std::string outputTensorStr = "output";
268 
269  // Define supported types.
270  std::array<DataType,6> supportedTypes =
271  {
278  };
279 
280  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
281  "Reference BatchToSpaceNd: input type not supported.");
282 
283  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
284  "Reference BatchToSpaceNd: output type not supported.");
285 
286  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
287  "Reference BatchToSpaceNd: input and output types mismatched.");
288 
289  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
290  reasonIfUnsupported,
291  CreateIncorrectDimensionsErrorMsg(4,
292  output.GetNumDimensions(),
293  batchToSpaceNdLayerStr,
294  outputTensorStr).data());
295 
296  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
297  reasonIfUnsupported,
298  CreateIncorrectDimensionsErrorMsg(4,
299  input.GetNumDimensions(),
300  batchToSpaceNdLayerStr,
301  inputTensorStr).data());
302 
303  return supported;
304 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 306 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

309 {
310  std::array<DataType, 9> supportedInputTypes =
311  {
320  };
321 
322  bool supported = true;
323  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
324  "Reference cast: input is not a supported type");
325 
326 
327  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
328  "Reference cast: output is not a supported type");
329 
330  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
331  "Reference cast: input and output shapes have different number of total elements");
332 
333  return supported;
334 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 336 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsEqualSupported(), and RefLayerSupport::IsGreaterSupported().

341 {
342  IgnoreUnused(descriptor);
343  std::array<DataType, 8> supportedInputTypes =
344  {
353  };
354 
355  bool supported = true;
356  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
357  "Reference comparison: input 0 is not a supported type");
358 
359  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
360  "Reference comparison: input 0 and Input 1 types are mismatched");
361 
362  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
363  "Reference comparison: output is not of type Boolean");
364 
365  return supported;
366 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const ConcatDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 368 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsMergerSupported().

372 {
373  IgnoreUnused(descriptor);
374 
375  bool supported = true;
376  std::array<DataType,6> supportedTypes =
377  {
384  };
385 
386  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
387  "Reference concatenation: output type not supported");
388  for (const TensorInfo* input : inputs)
389  {
390  ARMNN_ASSERT(input != nullptr);
391  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
392  "Reference concatenation: input type not supported");
393 
394  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
395  "Reference concatenation: input and output types mismatched.");
396  }
397 
398  return supported;
399 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 401 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

403 {
404  std::array<DataType,8> supportedTypes =
405  {
414  };
415 
416  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
417  "Reference constant: output is not a supported type.");
418 }
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 420 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

423 {
424  bool supported = true;
425 
426  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
427  "Reference for ConvertBf16ToFp32 layer: input type not supported");
428 
429  supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
430  "Reference for ConvertBf16ToFp32 layer: output type not supported");
431 
432  return supported;
433 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 435 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

438 {
439  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
440  input.GetDataType(),
441  &TrueFunc<>,
442  &FalseInputFuncF32<>,
443  &FalseFuncU8<>,
444  &FalseFuncI32<>,
445  &FalseFuncU8<>) &&
446  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
447  output.GetDataType(),
448  &FalseOutputFuncF16<>,
449  &TrueFunc<>,
450  &FalseFuncU8<>,
451  &FalseFuncI32<>,
452  &FalseFuncU8<>));
453 }
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 455 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

458 {
459  bool supported = true;
460 
461  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
462  "Reference for ConvertFp32ToBf16 layer: input type not supported");
463 
464  supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
465  "Reference for ConvertFp32ToBf16 layer: output type not supported");
466 
467  return supported;
468 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 470 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

473 {
474  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
475  input.GetDataType(),
476  &FalseInputFuncF16<>,
477  &TrueFunc<>,
478  &FalseFuncU8<>,
479  &FalseFuncI32<>,
480  &FalseFuncU8<>) &&
481  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
482  output.GetDataType(),
483  &TrueFunc<>,
484  &FalseOutputFuncF32<>,
485  &FalseFuncU8<>,
486  &FalseFuncI32<>,
487  &FalseFuncU8<>));
488 }
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 490 of file RefLayerSupport.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::QuantizedSymm8PerAxis, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

496 {
497  bool supported = true;
498 
499  // Define supported types.
500  std::array<DataType,7> supportedTypes =
501  {
509  };
510 
511  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
512  "Reference Convolution2d: input is not a supported type.");
513 
514  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
515  "Reference Convolution2d: output is not a supported type.");
516 
517  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
518  if (input.GetDataType() == DataType::BFloat16)
519  {
520  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
521  {
522  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
523  supported = false;
524  }
525  }
526  else
527  {
528  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
529  "Reference Convolution2d: input and output types mismatched.");
530  }
531 
532  const DataType inputType = input.GetDataType();
533  if (IsQuantized8BitType(inputType))
534  {
536  std::array<DataType, 4> supportedWeightTypes =
537  {
541  DataType::QuantizedSymm8PerAxis // deprecated
542  };
544 
545  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
546  "Reference Convolution2d: weights type not supported for quantized input.");
547  }
548  else
549  {
550  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
551  "Reference Convolution2d: weights is not a supported type.");
552 
553  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
554  "Reference Convolution2d: input and weights types mismatched.");
555  }
556 
557  if (biases.has_value())
558  {
559  std::array<DataType,4> biasesSupportedTypes =
560  {
565  };
566 
567  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
568  "Reference Convolution2d: biases is not a supported type.");
569  }
570  IgnoreUnused(descriptor);
571 
572  return supported;
573 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:265
DataType
Definition: Types.hpp:36
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 575 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

578 {
579  bool supported = true;
580 
581  std::array<DataType, 8> supportedTypes =
582  {
591  };
592 
593  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
594  "Reference for Debug layer: input type not supported");
595 
596  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
597  "Reference for Debug layer: output type not supported");
598 
599  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
600  "Reference for Debug layer: input and output types are mismatched");
601 
602  return supported;
603 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 605 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

609 {
610  IgnoreUnused(descriptor);
611  bool supported = true;
612 
613  std::array<DataType,6> supportedTypes =
614  {
621  };
622 
623  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
624  "Reference DepthToSpace: input type not supported");
625 
626  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
627  "Reference DepthToSpace: output type not supported");
628 
629  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
630  "Reference DepthToSpace: input and output types are mismatched");
631 
632  return supported;
633 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 635 of file RefLayerSupport.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::QuantizedSymm8PerAxis, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported().

641 {
642  IgnoreUnused(descriptor);
643  bool supported = true;
644 
645  // Define supported types.
646  std::array<DataType,7> supportedTypes =
647  {
655  };
656 
657  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
658  "Reference DepthwiseConvolution2d: input is not a supported type.");
659 
660  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
661  "Reference DepthwiseConvolution2d: output is not a supported type.");
662 
663  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
664  "Reference DepthwiseConvolution2d: input and output types mismatched.");
665 
666  const DataType inputType = input.GetDataType();
667  if (IsQuantized8BitType(inputType))
668  {
670  std::array<DataType, 4> supportedWeightTypes =
671  {
675  DataType::QuantizedSymm8PerAxis // deprecated
676  };
678 
679  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
680  "Reference DepthwiseConvolution2d: weights type not supported for "
681  "quantized input.");
682  }
683  else
684  {
685  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
686  "Reference DepthwiseConvolution2d: weights is not a supported type.");
687 
688  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
689  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
690  }
691 
692  if (biases.has_value())
693  {
694  std::array<DataType,4> biasesSupportedTypes =
695  {
700  };
701  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
702  "Reference DepthwiseConvolution2d: biases is not a supported type.");
703  }
704 
705  return supported;
706 
707 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:265
DataType
Definition: Types.hpp:36
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 709 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

712 {
713  bool supported = true;
714 
715  std::array<DataType,4> supportedInputTypes = {
720  };
721 
722  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
723  "Reference for Dequantize layer: input type not supported.");
724 
725  supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
726  "Reference for Dequantize layer: per-axis quantized input not support .");
727 
728  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
729  "Reference dequantize: per-axis quantized input not support .");
730 
731  std::array<DataType,3> supportedOutputTypes = {
735  };
736 
737  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
738  "Reference for Dequantize layer: output type not supported.");
739 
740  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
741  "Reference for Dequantize layer: input/output shapes have different num total "
742  "elements.");
743 
744  return supported;
745 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 747 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

756 {
757  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
758 
759  bool supported = true;
760 
761  std::array<DataType,6> supportedInputTypes =
762  {
769  };
770 
771  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
772  "Reference DetectionPostProcess: input 0 is not a supported type.");
773 
774  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
775  "Reference DetectionPostProcess: input 1 is not a supported type.");
776 
777  return supported;
778 }
ISubgraphViewConverter supported
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
void IgnoreUnused(Ts &&...)
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 780 of file RefLayerSupport.cpp.

References RefLayerSupport::IsDepthwiseConvolutionSupported().

786 {
787  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
788 }
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 790 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

794 {
795  bool supported = true;
796 
797  std::array<DataType,7> supportedTypes = {
805  };
806 
807  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
808  "Reference division: input 0 is not a supported type.");
809 
810  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
811  "Reference division: input 1 is not a supported type.");
812 
813  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
814  "Reference division: output is not a supported type.");
815 
816  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
817  "Reference division: input 0 and Input 1 types are mismatched");
818 
819  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
820  "Reference division: input and output types are mismatched");
821 
822  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
823  "Reference division: shapes are not suitable for implicit broadcast.");
824 
825  return supported;
826 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 828 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsAbsSupported(), and RefLayerSupport::IsRsqrtSupported().

832 {
833  IgnoreUnused(descriptor);
834 
835  std::array<DataType, 7> supportedTypes =
836  {
844  };
845 
846  std::array<DataType, 1> logicalSupportedTypes =
847  {
849  };
850 
851  bool supported = true;
852 
853  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
854  {
855  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
856  "Reference elementwise unary: input type not supported");
857 
858  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
859  "Reference elementwise unary: output type not supported");
860  }
861  else
862  {
863  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
864  "Reference elementwise unary: input type not supported");
865 
866  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
867  "Reference elementwise unary: output type not supported");
868  }
869 
870  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
871  "Reference elementwise unary: input and output types not matching");
872 
873  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
874  "Reference elementwise unary: input and output shapes"
875  "have different number of total elements");
876 
877  return supported;
878 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsEqualSupported()

bool IsEqualSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 880 of file RefLayerSupport.cpp.

References armnn::Equal, and RefLayerSupport::IsComparisonSupported().

884 {
885  return IsComparisonSupported(input0,
886  input1,
887  output,
888  ComparisonDescriptor(ComparisonOperation::Equal),
889  reasonIfUnsupported);
890 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 892 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

895 {
896  IgnoreUnused(descriptor);
897  bool supported = true;
898 
899  std::array<DataType,1> supportedTypes =
900  {
902  };
903 
904  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
905  "Reference fake quantization: input type not supported.");
906 
907  return supported;
908 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 910 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

914 {
915  IgnoreUnused(descriptor);
916  IgnoreUnused(output);
917 
918  bool supported = true;
919 
920  std::array<DataType,3> supportedTypes =
921  {
925  };
926 
927  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
928  "Reference Fill: input type not supported.");
929 
930  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
931  "Reference Fill: output type not supported.");
932  return supported;
933 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 935 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

938 {
939  IgnoreUnused(output);
940  bool supported = true;
941 
942  std::array<DataType,3> supportedTypes =
943  {
947  };
948 
949  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
950  "Reference Floor: input type not supported.");
951 
952  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
953  "Reference Floor: output type not supported.");
954 
955  return supported;
956 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 958 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

964 {
965  bool supported = true;
966 
967  // Define supported types.
968  std::array<DataType,6> supportedTypes =
969  {
976  };
977 
978  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
979  "Reference Fully Connected: input type not supported.");
980 
981  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
982  "Reference Fully Connected: output type not supported.");
983 
984  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
985  "Reference Fully Connected: weights type not supported.");
986 
987  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
988  if (input.GetDataType() == DataType::BFloat16)
989  {
990  if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
991  {
992  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
993  supported = false;
994  }
995  }
996  else
997  {
998  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
999  "Reference Fully Connected: input and output types mismatched.");
1000  }
1001 
1002  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1003  "Reference Fully Connected: weights is not a supported type.");
1004 
1005  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1006  "Reference Fully Connected: input and weights types mismatched.");
1007 
1008  if (descriptor.m_BiasEnabled)
1009  {
1010  // Defined supported types for bias
1011  std::array<DataType, 5>
1012  supportedBiasTypes =
1013  {
1019  };
1020 
1021  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1022  "Reference Fully Connected: bias type not supported.");
1023 
1024  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1025  "Reference Fully Connected: bias and weight types mismatch.");
1026 
1027  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1028  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1029 
1030  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1031  "Reference Fully Connected: bias must have 1 dimension.");
1032 
1033  }
1034 
1035  return supported;
1036 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1038 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, GatherDescriptor::m_Axis, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

1043 {
1044  bool supported = true;
1045  std::array<DataType,7> supportedTypes =
1046  {
1054  };
1055 
1056  if (descriptor.m_Axis != 0)
1057  {
1058  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1059  supported &= false;
1060  }
1061  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1062  "Reference Gather: input type not supported");
1063 
1064  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1065  "Reference Gather: output type not supported");
1066 
1067  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1068  "Reference Gather: indices (input1) type not supported");
1069 
1070  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1071  "Reference Gather: input and output types not matching");
1072 
1073  return supported;
1074 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsGreaterSupported()

bool IsGreaterSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1076 of file RefLayerSupport.cpp.

References armnn::Greater, and RefLayerSupport::IsComparisonSupported().

1080 {
1081  return IsComparisonSupported(input0,
1082  input1,
1083  output,
1084  ComparisonDescriptor(ComparisonOperation::Greater),
1085  reasonIfUnsupported);
1086 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1088 of file RefLayerSupport.cpp.

1090 {
1091  return true;
1092 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1094 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

1098 {
1099  IgnoreUnused(descriptor);
1100  // Define supported types
1101  std::array<DataType, 3> supportedTypes =
1102  {
1106  };
1107 
1108  bool supported = true;
1109 
1110  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1111  "Reference Instance Normalization: input type not supported.");
1112 
1113  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1114  "Reference Instance Normalization: output type not supported.");
1115 
1116  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1117  "Reference Instance Normalization: input and output types mismatched.");
1118 
1119  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1120  "Reference Instance Normalization: input and output shapes have different "
1121  "num total elements.");
1122 
1123  return supported;
1124 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1126 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1130 {
1131  IgnoreUnused(descriptor);
1132  // Define supported types
1133  std::array<DataType, 6> supportedTypes =
1134  {
1141  };
1142 
1143  bool supported = true;
1144 
1145  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1146  "Reference L2normalization: input type not supported.");
1147 
1148  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1149  "Reference L2normalization: output type not supported.");
1150 
1151  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1152  "Reference L2normalization: input and output types mismatched.");
1153 
1154  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1155  "Reference L2normalization: input and output shapes have different "
1156  "num total elements.");
1157 
1158  return supported;
1159 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1161 of file RefLayerSupport.cpp.

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

1166 {
1167  IgnoreUnused(descriptor);
1168 
1169  std::array<DataType, 1> supportedTypes =
1170  {
1172  };
1173 
1174  bool supported = true;
1175  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1176  "Reference LogicalBinary: input 0 type not supported");
1177  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1178  "Reference LogicalBinary: input 1 type not supported");
1179 
1180  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1181  "Reference LogicalBinary: input and output types do not match");
1182 
1183  return supported;
1184 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1186 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

1190 {
1191  IgnoreUnused(descriptor);
1192 
1193  std::array<DataType, 3> supportedTypes =
1194  {
1198  };
1199 
1200  bool supported = true;
1201  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1202  "Reference LogSoftmax: input type not supported");
1203 
1204  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1205  "Reference LogSoftmax: output type not supported");
1206 
1207  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1208  "Reference LogSoftmax: input and output types do not match");
1209 
1210  return supported;
1211 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1213 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

1223 {
1224  IgnoreUnused(descriptor);
1225  IgnoreUnused(paramsInfo);
1226 
1227  bool supported = true;
1228 
1229  std::array<DataType,3> supportedTypes = {
1233  };
1234 
1235  // check inputs and outputs
1236  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1237  "Reference Lstm: input is not a supported type.");
1238  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1239  "Reference Lstm: input and outputStateIn types are mismatched");
1240  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1241  "Reference Lstm: input and cellStateIn types are mismatched");
1242  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1243  "Reference Lstm: input and scratchBuffer types are mismatched");
1244  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1245  "Reference Lstm: input and outputStateOut types are mismatched");
1246  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1247  "Reference Lstm: input and cellStateOut types are mismatched");
1248  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1249  "Reference Lstm: input and output types are mismatched");
1250  // check layer parameters
1251  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1252  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1253  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1254  "Reference Lstm: input and InputToCellWeights types are mismatched");
1255  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1256  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1257  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1258  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1259  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1260  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1261  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1262  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1263  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1264  "Reference Lstm: input and ForgetGateBias types are mismatched");
1265  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1266  "Reference Lstm: input and CellBias types are mismatched");
1267  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1268  "Reference Lstm: input and OutputGateBias types are mismatched");
1269  if (!descriptor.m_CifgEnabled)
1270  {
1271  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1272  "Reference Lstm: input and InputToInputWeights types are mismatched");
1273  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1274  reasonIfUnsupported,
1275  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1276  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1277  "Reference Lstm: input and InputGateBias types are mismatched");
1278  if (descriptor.m_PeepholeEnabled)
1279  {
1280  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1281  reasonIfUnsupported,
1282  "Reference Lstm: input and CellToInputWeights types are mismatched");
1283  }
1284  }
1285  if (descriptor.m_PeepholeEnabled)
1286  {
1287  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1288  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1289  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1290  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1291  }
1292  if (descriptor.m_ProjectionEnabled)
1293  {
1294  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1295  "Reference Lstm: input and mProjectionWeights types are mismatched");
1296  if (paramsInfo.m_ProjectionBias != nullptr)
1297  {
1298  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1299  "Reference Lstm: input and ProjectionBias types are mismatched");
1300  }
1301  }
1302  if (descriptor.m_LayerNormEnabled)
1303  {
1304  if (!descriptor.m_CifgEnabled)
1305  {
1306  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1307  reasonIfUnsupported,
1308  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1309  }
1310  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1311  reasonIfUnsupported,
1312  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1313  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1314  reasonIfUnsupported,
1315  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1316  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1317  reasonIfUnsupported,
1318  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1319  }
1320 
1321  return supported;
1322 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1324 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1328 {
1329  bool supported = true;
1330 
1331  std::array<DataType,7> supportedTypes = {
1339  };
1340 
1341  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1342  "Reference maximum: input 0 is not a supported type.");
1343 
1344  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1345  "Reference maximum: input 1 is not a supported type.");
1346 
1347  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1348  "Reference maximum: output is not a supported type.");
1349 
1350  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1351  "Reference maximum: input 0 and Input 1 types are mismatched");
1352 
1353  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1354  "Reference maximum: input and output types are mismatched");
1355 
1356  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1357  "Reference maximum: shapes are not suitable for implicit broadcast.");
1358 
1359  return supported;
1360 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1362 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::numeric_cast(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1366 {
1367  bool supported = true;
1368  std::string meanLayerStr = "Mean";
1369  std::string outputTensorStr = "output";
1370 
1371  std::array<DataType,6> supportedTypes =
1372  {
1379  };
1380 
1381  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1382  "Reference Mean: input type not supported.");
1383 
1384  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1385  "Reference Mean: input and output types are mismatched");
1386 
1387  if (descriptor.m_KeepDims)
1388  {
1389  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1390  reasonIfUnsupported,
1391  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1392  output.GetNumDimensions(),
1393  meanLayerStr, outputTensorStr).data());
1394  }
1395  else if (descriptor.m_Axis.empty())
1396  {
1397  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1398  reasonIfUnsupported,
1399  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1400  meanLayerStr, outputTensorStr).data());
1401  }
1402  else
1403  {
1404  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1405 
1406  if (outputDim > 0)
1407  {
1408  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1409  reasonIfUnsupported,
1410  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1411  meanLayerStr, outputTensorStr).data());
1412  }
1413  else
1414  {
1415  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1416  reasonIfUnsupported,
1417  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1418  meanLayerStr, outputTensorStr).data());
1419  }
1420  }
1421 
1422  return supported;
1423 }
ISubgraphViewConverter supported
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1433 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1436 {
1437  bool supported = true;
1438 
1439  std::array<DataType,7> supportedTypes =
1440  {
1448  };
1449 
1450  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1451  "Reference MemCopy: input type not supported");
1452 
1453  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1454  "Reference MemCopy: output type not supported");
1455 
1456  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1457  "Reference MemCopy: input and output types are mismatched");
1458 
1459  return supported;
1460 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMergerSupported()

bool IsMergerSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const MergerDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1425 of file RefLayerSupport.cpp.

References RefLayerSupport::IsConcatSupported().

1429 {
1430  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
1431 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1462 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1466 {
1467  bool supported = true;
1468 
1469  std::array<DataType,7> supportedTypes = {
1477  };
1478 
1479  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1480  "Reference minimum: input 0 is not a supported type.");
1481 
1482  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1483  "Reference minimum: input 1 is not a supported type.");
1484 
1485  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1486  "Reference minimum: output is not a supported type.");
1487 
1488  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1489  "Reference minimum: input 0 and Input 1 types are mismatched");
1490 
1491  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1492  "Reference minimum: input and output types are mismatched");
1493 
1494  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1495  "Reference minimum: shapes are not suitable for implicit broadcast.");
1496 
1497  return supported;
1498 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1500 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1504 {
1505  bool supported = true;
1506 
1507  std::array<DataType,7> supportedTypes = {
1515  };
1516 
1517  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1518  "Reference multiplication: input 0 is not a supported type.");
1519 
1520  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1521  "Reference multiplication: input 1 is not a supported type.");
1522 
1523  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1524  "Reference multiplication: output is not a supported type.");
1525 
1526  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1527  "Reference multiplication: input 0 and Input 1 types are mismatched");
1528 
1529  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1530  "Reference multiplication: input and output types are mismatched");
1531 
1532  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1533  "Reference multiplication: shapes are not suitable for implicit broadcast.");
1534 
1535  return supported;
1536 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1538 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1542 {
1543  IgnoreUnused(descriptor);
1544 
1545  // Define supported types
1546  std::array<DataType, 6> supportedTypes =
1547  {
1554  };
1555 
1556  bool supported = true;
1557 
1558  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1559  "Reference normalization: input type not supported.");
1560 
1561  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1562  "Reference normalization: output type not supported.");
1563 
1564  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1565  "Reference normalization: input and output shapes have different "
1566  "num total elements.");
1567 
1568  return supported;
1569 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1571 of file RefLayerSupport.cpp.

1573 {
1574  return true;
1575 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1577 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1581 {
1582  IgnoreUnused(descriptor);
1583  bool supported = true;
1584 
1585  // Define supported output and inputs types.
1586  std::array<DataType,6> supportedTypes =
1587  {
1594  };
1595 
1596  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1597  "Reference pad: input is not a supported type.");
1598 
1599  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1600  "Reference pad: output is not a supported type.");
1601 
1602  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1603  "Reference pad: input and output types are mismatched.");
1604 
1605  return supported;
1606 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1608 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1612 {
1613  IgnoreUnused(descriptor);
1614  bool supported = true;
1615 
1616  // Define supported output and inputs types.
1617  std::array<DataType, 6> supportedTypes =
1618  {
1625  };
1626 
1627  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1628  "Reference permute: input is not a supported type.");
1629 
1630  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1631  "Reference permute: output is not a supported type.");
1632 
1633  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1634  "Reference permute: input and output types are mismatched.");
1635 
1636  return supported;
1637 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1639 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1643 {
1644  IgnoreUnused(descriptor);
1645  bool supported = true;
1646 
1647  // Define supported output and inputs types.
1648  std::array<DataType,6> supportedTypes =
1649  {
1656  };
1657 
1658  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1659  "Reference poolind2d: input is not a supported type.");
1660 
1661  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1662  "Reference poolind2d: output is not a supported type.");
1663 
1664  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1665  "Reference poolind2d: input and output types are mismatched.");
1666 
1667  return supported;
1668 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2137 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2141 {
2142  bool supported = true;
2143 
2144  std::array<DataType, 6> supportedTypes
2145  {
2152  };
2153 
2154  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2155  "PReLU: input is not a supported type.");
2156 
2157  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2158  "PReLU: alpha is not a supported type.");
2159 
2160  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2161  "PReLU: output is not a supported type.");
2162 
2163  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2164  "PReLU: input, alpha and output types are mismatched");
2165 
2166  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2167  "PReLU: shapes are not suitable for implicit broadcast");
2168 
2169  return supported;
2170 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1670 of file RefLayerSupport.cpp.

References armnn::IgnoreUnused().

1679 {
1680  IgnoreUnused(input);
1681  IgnoreUnused(previousOutputIn);
1682  IgnoreUnused(previousCellStateIn);
1683  IgnoreUnused(outputStateOut);
1684  IgnoreUnused(cellStateOut);
1685  IgnoreUnused(output);
1686  IgnoreUnused(descriptor);
1687  IgnoreUnused(paramsInfo);
1688 
1689  IgnoreUnused(reasonIfUnsupported);
1690 
1691  return true;
1692 }
void IgnoreUnused(Ts &&...)

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1694 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

1697 {
1698  bool supported = true;
1699 
1700  // Define supported input types.
1701  std::array<DataType,7> supportedInputTypes = {
1709  };
1710 
1711  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1712  "Reference quantize: input type not supported.");
1713 
1714  // Define supported output types.
1715  std::array<DataType,4> supportedOutputTypes = {
1719  DataType::QSymmS16
1720  };
1721  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1722  "Reference quantize: output type not supported.");
1723 
1724  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1725  "Reference quantize: input and output shapes have different num total elements.");
1726 
1727  return supported;
1728 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1730 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

1733 {
1734  IgnoreUnused(input);
1735  // Define supported output types.
1736  std::array<DataType,1> supportedOutputTypes =
1737  {
1739  };
1740 
1741  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1742  "Reference rank: input type not supported.");
1743 }
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1745 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1749 {
1750  IgnoreUnused(descriptor);
1751  bool supported = true;
1752  std::array<DataType,7> supportedTypes =
1753  {
1761  };
1762 
1763  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1764  "Reference Reduce: input type not supported");
1765 
1766  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1767  "Reference Reduce: output type not supported");
1768 
1769  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1770  "Reference Reduce: input and output types not matching");
1771 
1772  return supported;
1773 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1775 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

1779 {
1780  IgnoreUnused(output);
1781  IgnoreUnused(descriptor);
1782  // Define supported output types.
1783  std::array<DataType,8> supportedOutputTypes =
1784  {
1793  };
1794 
1795  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
1796  "Reference reshape: input type not supported.");
1797 }
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeBilinearSupported()

bool IsResizeBilinearSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1799 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1802 {
1803  bool supported = true;
1804  std::array<DataType,6> supportedTypes =
1805  {
1812  };
1813 
1814  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1815  "Reference ResizeBilinear: input type not supported");
1816 
1817  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1818  "Reference ResizeBilinear: output type not supported");
1819 
1820  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1821  "Reference ResizeBilinear: input and output types not matching");
1822 
1823  return supported;
1824 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1826 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1830 {
1831  IgnoreUnused(descriptor);
1832  bool supported = true;
1833  std::array<DataType,6> supportedTypes =
1834  {
1841  };
1842 
1843  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1844  "Reference Resize: input type not supported");
1845 
1846  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1847  "Reference Resize: output type not supported");
1848 
1849  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1850  "Reference Resize: input and output types not matching");
1851 
1852  return supported;
1853 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRsqrtSupported()

bool IsRsqrtSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1855 of file RefLayerSupport.cpp.

References RefLayerSupport::IsElementwiseUnarySupported(), and armnn::Rsqrt.

1858 {
1859  return IsElementwiseUnarySupported(input,
1860  output,
1861  ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
1862  reasonIfUnsupported);
1863 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1865 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1869 {
1870  IgnoreUnused(descriptor);
1871  bool supported = true;
1872 
1873  std::array<DataType, 5> supportedTypes =
1874  {
1880  };
1881 
1882  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1883  "Reference Slice: input type not supported");
1884 
1885  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1886  "Reference Slice: output type not supported");
1887 
1888  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1889  "Reference Slice: input and output types are mismatched");
1890 
1891  return supported;
1892 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1894 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

1898 {
1899  IgnoreUnused(descriptor);
1900  bool supported = true;
1901  std::array<DataType,7> supportedTypes =
1902  {
1910  };
1911 
1912  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1913  "Reference Softmax: output type not supported");
1914 
1915  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1916  "Reference Softmax: input type not supported");
1917 
1918  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1919  "Reference Softmax: input type not supported");
1920 
1921  return supported;
1922 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1924 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1928 {
1929  IgnoreUnused(descriptor);
1930  bool supported = true;
1931  std::array<DataType,6> supportedTypes =
1932  {
1939  };
1940 
1941  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1942  "Reference SpaceToBatchNd: input type not supported");
1943 
1944  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1945  "Reference SpaceToBatchNd: output type not supported");
1946 
1947  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1948  "Reference SpaceToBatchNd: input and output types are mismatched");
1949 
1950  return supported;
1951 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1953 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1957 {
1958 
1959  IgnoreUnused(descriptor);
1960  bool supported = true;
1961 
1962  std::array<DataType,6> supportedTypes =
1963  {
1970  };
1971 
1972  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1973  "Reference SpaceToDepth: input type not supported");
1974 
1975  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1976  "Reference SpaceToDepth: output type not supported");
1977 
1978  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1979  "Reference SpaceToDepth: input and output types are mismatched");
1980 
1981  return supported;
1982 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported() [1/2]

bool IsSplitterSupported ( const TensorInfo input,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1984 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

1987 {
1988  IgnoreUnused(descriptor);
1989  bool supported = true;
1990  std::array<DataType,6> supportedTypes =
1991  {
1998  };
1999 
2000  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2001  "Reference splitter: input type not supported");
2002 
2003  return supported;
2004 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported() [2/2]

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2006 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2010 {
2011  IgnoreUnused(descriptor);
2012  bool supported = true;
2013  std::array<DataType,6> supportedTypes =
2014  {
2021  };
2022 
2023  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2024  "Reference splitter: output type not supported");
2025  for (const TensorInfo& output : outputs)
2026  {
2027  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2028  "Reference splitter: input type not supported");
2029 
2030  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2031  "Reference splitter: input and output types mismatched.");
2032  }
2033 
2034  return supported;
2035 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2037 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2041 {
2042  IgnoreUnused(descriptor);
2043 
2044  bool supported = true;
2045  std::array<DataType,6> supportedTypes =
2046  {
2053  };
2054 
2055  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2056  "Reference stack: output type not supported");
2057  for (const TensorInfo* input : inputs)
2058  {
2059  ARMNN_ASSERT(input != nullptr);
2060  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2061  "Reference stack: input type not supported");
2062 
2063  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2064  "Reference stack: input and output types mismatched.");
2065  }
2066 
2067  return supported;
2068 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2070 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2074 {
2075  IgnoreUnused(descriptor);
2076  bool supported = true;
2077 
2078  std::array<DataType,5> supportedTypes =
2079  {
2085  };
2086 
2087  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2088  "Reference StridedSlice: input type not supported");
2089 
2090  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2091  "Reference StridedSlice: output type not supported");
2092 
2093  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2094  "Reference StridedSlice: input and output types are mismatched");
2095 
2096  return supported;
2097 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2099 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

2103 {
2104  bool supported = true;
2105 
2106  std::array<DataType,7> supportedTypes = {
2114  };
2115 
2116  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2117  "Reference subtraction: input 0 is not a supported type.");
2118 
2119  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2120  "Reference subtraction: input 1 is not a supported type.");
2121 
2122  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2123  "Reference subtraction: output is not a supported type.");
2124 
2125  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2126  "Reference subtraction: input 0 and Input 1 types are mismatched");
2127 
2128  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2129  "Reference subtraction: input and output types are mismatched");
2130 
2131  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2132  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2133 
2134  return supported;
2135 }
ISubgraphViewConverter supported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2172 of file RefLayerSupport.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::QuantizedSymm8PerAxis, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

2178 {
2179  IgnoreUnused(descriptor);
2180  bool supported = true;
2181 
2182  std::array<DataType,7> supportedTypes =
2183  {
2191  };
2192 
2193  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2194  "Reference TransposeConvolution2d: input is not a supported type.");
2195 
2196  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2197  "Reference TransposeConvolution2d: output is not a supported type.");
2198 
2199  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2200  "Reference TransposeConvolution2d: input and output types mismatched.");
2201 
2202 
2203  const DataType inputType = input.GetDataType();
2204  if (IsQuantized8BitType(inputType))
2205  {
2207  std::array<DataType, 4> supportedWeightTypes =
2208  {
2212  DataType::QuantizedSymm8PerAxis //Deprecated
2213  };
2215 
2216  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2217  "Reference TransposeConvolution2d: weights type not supported for "
2218  "quantized input.");
2219  }
2220  else
2221  {
2222  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2223  "Reference TransposeConvolution2d: weights is not a supported type.");
2224 
2225  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2226  "Reference TransposeConvolution2d: input and weights types mismatched.");
2227  }
2228 
2229  if (biases.has_value())
2230  {
2231  std::array<DataType,4> biasesSupportedTypes =
2232  {
2237  };
2238  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2239  "Reference TransposeConvolution2d: biases is not a supported type.");
2240  }
2241 
2242  return supported;
2243 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:265
DataType
Definition: Types.hpp:36
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 2245 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

2249 {
2250  IgnoreUnused(descriptor);
2251  bool supported = true;
2252 
2253  // Define supported output and inputs types.
2254  std::array<DataType, 6> supportedTypes =
2255  {
2262  };
2263 
2264  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2265  "Reference transpose: input is not a supported type.");
2266 
2267  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2268  "Reference transpose: output is not a supported type.");
2269 
2270  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2271  "Reference transpose: input and output types are mismatched.");
2272 
2273  return supported;
2274 }
ISubgraphViewConverter supported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

The documentation for this class was generated from the following files: