ArmNN
 23.05
LayerSupportHandle Class Reference

#include <BackendHelper.hpp>

Public Member Functions

 LayerSupportHandle (std::shared_ptr< ILayerSupport > layerSupport)
 
 LayerSupportHandle (std::shared_ptr< ILayerSupport > layerSupport, const BackendId &backendId)
 
bool IsBackendRegistered () const
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsBatchMatMulSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsElementwiseBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 

Detailed Description

Definition at line 29 of file BackendHelper.hpp.

Constructor & Destructor Documentation

◆ LayerSupportHandle() [1/2]

LayerSupportHandle ( std::shared_ptr< ILayerSupport layerSupport)
inlineexplicit

Definition at line 32 of file BackendHelper.hpp.

33  : m_LayerSupport(std::move(layerSupport)), m_BackendId(Compute::Undefined) {};

References armnn::Undefined.

◆ LayerSupportHandle() [2/2]

LayerSupportHandle ( std::shared_ptr< ILayerSupport layerSupport,
const BackendId backendId 
)
inlineexplicit

Definition at line 35 of file BackendHelper.hpp.

36  : m_LayerSupport(std::move(layerSupport)), m_BackendId(backendId) {};

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 144 of file BackendHelper.cpp.

148 {
149  TensorInfos infos{input, output};
150 
151  return m_LayerSupport->IsLayerSupported(LayerType::Activation,
152  infos,
153  descriptor,
154  EmptyOptional(),
155  EmptyOptional(),
156  reasonIfUnsupported);
157 }

References armnn::Activation.

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 159 of file BackendHelper.cpp.

163 {
164  TensorInfos infos{input0, input1, output};
165 
166  return m_LayerSupport->IsLayerSupported(LayerType::Addition,
167  infos,
168  BaseDescriptor(),
169  EmptyOptional(),
170  EmptyOptional(),
171  reasonIfUnsupported);
172 }

References armnn::Addition.

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 174 of file BackendHelper.cpp.

178 {
179  TensorInfos infos{input, output};
180 
181  return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
182  infos,
183  descriptor,
184  EmptyOptional(),
185  EmptyOptional(),
186  reasonIfUnsupported);
187 }

References armnn::ArgMinMax.

◆ IsBackendRegistered()

bool IsBackendRegistered ( ) const

Definition at line 132 of file BackendHelper.cpp.

133 {
134  if (m_LayerSupport)
135  {
136  return true;
137  }
138 
139  return false;
140 }

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 189 of file BackendHelper.cpp.

194 {
195  TensorInfos infos{input0, input1, output};
196 
197  return m_LayerSupport->IsLayerSupported(LayerType::BatchMatMul,
198  infos,
199  descriptor,
200  EmptyOptional(),
201  EmptyOptional(),
202  reasonIfUnsupported);
203 }

References armnn::BatchMatMul.

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 205 of file BackendHelper.cpp.

213 {
214  TensorInfos infos{input, output, mean, var, beta, gamma};
215 
216  return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
217  infos,
218  descriptor,
219  EmptyOptional(),
220  EmptyOptional(),
221  reasonIfUnsupported);
222 }

References armnn::BatchNormalization.

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 224 of file BackendHelper.cpp.

228 {
229  TensorInfos infos{input, output};
230 
231  return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
232  infos,
233  descriptor,
234  EmptyOptional(),
235  EmptyOptional(),
236  reasonIfUnsupported);
237 }

References armnn::BatchToSpaceNd.

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 239 of file BackendHelper.cpp.

242 {
243  TensorInfos infos{input, output};
244 
245  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
246  infos,
247  BaseDescriptor(),
248  EmptyOptional(),
249  EmptyOptional(),
250  reasonIfUnsupported);
251 }

References armnn::Cast.

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 253 of file BackendHelper.cpp.

257 {
258  TensorInfos infos{input, output};
259 
260  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
261  infos,
262  descriptor,
263  EmptyOptional(),
264  EmptyOptional(),
265  reasonIfUnsupported);
266 }

References armnn::ChannelShuffle.

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 268 of file BackendHelper.cpp.

273 {
274  TensorInfos infos{input0, input1, output};
275 
276  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
277  infos,
278  descriptor,
279  EmptyOptional(),
280  EmptyOptional(),
281  reasonIfUnsupported);
282 }

References armnn::Comparison.

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 284 of file BackendHelper.cpp.

288 {
289  TensorInfos infos;
290  for (const TensorInfo* inputInfo : inputs)
291  {
292  infos.push_back(*inputInfo);
293  }
294  infos.push_back(output);
295 
296  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
297  infos,
298  descriptor,
299  EmptyOptional(),
300  EmptyOptional(),
301  reasonIfUnsupported);
302 }

References armnn::Concat.

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 304 of file BackendHelper.cpp.

306 {
307  TensorInfos infos{output};
308 
309  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
310  infos,
311  BaseDescriptor(),
312  EmptyOptional(),
313  EmptyOptional(),
314  reasonIfUnsupported);
315 }

References armnn::Constant.

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 317 of file BackendHelper.cpp.

320 {
321  TensorInfos infos{input, output};
322 
323  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
324  infos,
325  BaseDescriptor(),
326  EmptyOptional(),
327  EmptyOptional(),
328  reasonIfUnsupported);
329 }

References armnn::ConvertFp16ToFp32.

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 331 of file BackendHelper.cpp.

334 {
335  TensorInfos infos{input, output};
336 
337  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
338  infos,
339  BaseDescriptor(),
340  EmptyOptional(),
341  EmptyOptional(),
342  reasonIfUnsupported);
343 }

References armnn::ConvertFp32ToFp16.

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 345 of file BackendHelper.cpp.

351 {
352  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
353  TensorInfos infos{input, output, weights, biasesVal};
354 
355  Optional<const BackendOptions::BackendOption> capability ;
356  if (!m_BackendId.IsUndefined())
357  {
358  capability = GetCapability("NonConstWeights", m_BackendId);
359  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
360  {
361  if (!weights.IsConstant())
362  {
363  if (reasonIfUnsupported.has_value())
364  {
365  reasonIfUnsupported.value() =
366  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
367  "Convolution2d weights are set as dynamic (non constant). ";
368  }
369  return false;
370  }
371  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
372  {
373  if (reasonIfUnsupported.has_value())
374  {
375  reasonIfUnsupported.value() =
376  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
377  "Convolution2d biases are set as dynamic (non constant). ";
378  }
379  return false;
380  }
381 
382  // At the first stage we will only print a warning. this is to give
383  // backend developers a chance to adopt and read weights from input slots.
384  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
385  "If you are a backend developer please find more information in our "
386  "doxygen documentation on github https://github.com/ARM-software/armnn "
387  "under the keyword 'ConstTensorsAsInputs'.";
388  }
389  }
390 
391  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
392  infos,
393  descriptor,
394  EmptyOptional(),
395  EmptyOptional(),
396  reasonIfUnsupported);
397 }

References ARMNN_LOG, armnn::Convolution2d, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), Convolution2dDescriptor::m_BiasEnabled, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), OptionalReferenceSwitch< IsReference, T >::value(), and armnn::warning.

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 399 of file BackendHelper.cpp.

405 {
406  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
407  TensorInfos infos{input, output, weights, biasesVal};
408 
409  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
410  infos,
411  descriptor,
412  EmptyOptional(),
413  EmptyOptional(),
414  reasonIfUnsupported);
415 }

References armnn::Convolution3d, OptionalBase::has_value(), and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 417 of file BackendHelper.cpp.

420 {
421  TensorInfos infos{input, output};
422 
423  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
424  infos,
425  BaseDescriptor(),
426  EmptyOptional(),
427  EmptyOptional(),
428  reasonIfUnsupported);
429 }

References armnn::Debug.

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 431 of file BackendHelper.cpp.

435 {
436  TensorInfos infos{input, output};
437 
438  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
439  infos,
440  descriptor,
441  EmptyOptional(),
442  EmptyOptional(),
443  reasonIfUnsupported);
444 }

References armnn::DepthToSpace.

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 446 of file BackendHelper.cpp.

453 {
454  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
455  TensorInfos infos{input, output, weights, biasesVal};
456 
457  Optional<const BackendOptions::BackendOption> capability ;
458  if (!m_BackendId.IsUndefined())
459  {
460  capability = GetCapability("NonConstWeights", m_BackendId);
461  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
462  {
463  if (!weights.IsConstant())
464  {
465  if (reasonIfUnsupported.has_value())
466  {
467  reasonIfUnsupported.value() =
468  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
469  "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
470  }
471  return false;
472  }
473  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
474  {
475  if (reasonIfUnsupported.has_value())
476  {
477  reasonIfUnsupported.value() =
478  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
479  "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
480  }
481  return false;
482  }
483  // At the first stage we will only print a warning. this is to give
484  // backend developers a chance to adopt and read weights from input slots.
485  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
486  "If you are a backend developer please find more information in our "
487  "doxygen documentation on github https://github.com/ARM-software/armnn "
488  "under the keyword 'ConstTensorsAsInputs'.";
489  }
490  }
491 
492  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
493  infos,
494  descriptor,
495  EmptyOptional(),
496  EmptyOptional(),
497  reasonIfUnsupported);
498 }

References ARMNN_LOG, armnn::DepthwiseConvolution2d, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), OptionalReferenceSwitch< IsReference, T >::value(), and armnn::warning.

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 500 of file BackendHelper.cpp.

503 {
504  TensorInfos infos{input, output};
505 
506  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
507  infos,
508  BaseDescriptor(),
509  EmptyOptional(),
510  EmptyOptional(),
511  reasonIfUnsupported);
512 }

References armnn::Dequantize.

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 514 of file BackendHelper.cpp.

523 {
524  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
525 
526  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
527  infos,
528  descriptor,
529  EmptyOptional(),
530  EmptyOptional(),
531  reasonIfUnsupported);
532 }

References armnn::DetectionPostProcess.

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 534 of file BackendHelper.cpp.

541 {
542  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
543  TensorInfos infos{input, output, weights, biasesVal};
544 
545  Optional<const BackendOptions::BackendOption> capability ;
546  if (!m_BackendId.IsUndefined())
547  {
548  capability = GetCapability("NonConstWeights", m_BackendId);
549  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
550  {
551  if (!weights.IsConstant())
552  {
553  if (reasonIfUnsupported.has_value())
554  {
555  reasonIfUnsupported.value() =
556  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
557  "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
558  }
559  return false;
560  }
561  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
562  {
563  if (reasonIfUnsupported.has_value())
564  {
565  reasonIfUnsupported.value() =
566  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
567  "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
568  }
569  return false;
570  }
571  // At the first stage we will only print a warning. this is to give
572  // backend developers a chance to adopt and read weights from input slots.
573  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
574  "If you are a backend developer please find more information in our "
575  "doxygen documentation on github https://github.com/ARM-software/armnn "
576  "under the keyword 'ConstTensorsAsInputs'.";
577  }
578  }
579 
580  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
581  infos,
582  descriptor,
583  EmptyOptional(),
584  EmptyOptional(),
585  reasonIfUnsupported);
586 }

References ARMNN_LOG, armnn::DepthwiseConvolution2d, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), OptionalReferenceSwitch< IsReference, T >::value(), and armnn::warning.

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 588 of file BackendHelper.cpp.

592 {
593  TensorInfos infos{input0, input1, output};
594 
595  return m_LayerSupport->IsLayerSupported(LayerType::Division,
596  infos,
597  BaseDescriptor(),
598  EmptyOptional(),
599  EmptyOptional(),
600  reasonIfUnsupported);
601 }

References armnn::Division.

◆ IsElementwiseBinarySupported()

bool IsElementwiseBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ElementwiseBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 603 of file BackendHelper.cpp.

608 {
609  TensorInfos infos{input0, input1, output};
610 
611  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
612  infos,
613  descriptor,
614  EmptyOptional(),
615  EmptyOptional(),
616  reasonIfUnsupported);
617 }

References armnn::ElementwiseBinary.

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 619 of file BackendHelper.cpp.

623 {
624  TensorInfos infos{input, output};
625 
626  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
627  infos,
628  descriptor,
629  EmptyOptional(),
630  EmptyOptional(),
631  reasonIfUnsupported);
632 }

References armnn::ElementwiseUnary.

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 634 of file BackendHelper.cpp.

637 {
638  TensorInfos infos{input};
639 
640  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
641  infos,
642  descriptor,
643  EmptyOptional(),
644  EmptyOptional(),
645  reasonIfUnsupported);
646 }

References armnn::FakeQuantization.

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 648 of file BackendHelper.cpp.

652 {
653  TensorInfos infos{input, output};
654 
655  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
656  infos,
657  descriptor,
658  EmptyOptional(),
659  EmptyOptional(),
660  reasonIfUnsupported);
661 }

References armnn::Fill.

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 663 of file BackendHelper.cpp.

666 {
667  TensorInfos infos{input, output};
668 
669  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
670  infos,
671  BaseDescriptor(),
672  EmptyOptional(),
673  EmptyOptional(),
674  reasonIfUnsupported);
675 }

References armnn::Floor.

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 677 of file BackendHelper.cpp.

683 {
684  TensorInfos infos{input, output, weights, biases};
685 
686  Optional<const BackendOptions::BackendOption> capability;
687  if (!m_BackendId.IsUndefined())
688  {
689  capability = GetCapability("NonConstWeights", m_BackendId);
690  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
691  {
692  if (!descriptor.m_ConstantWeights)
693  {
694  if (reasonIfUnsupported.has_value())
695  {
696  reasonIfUnsupported.value() =
697  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
698  "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
699  }
700  return false;
701  }
702  if (!weights.IsConstant())
703  {
704  if (reasonIfUnsupported.has_value())
705  {
706  reasonIfUnsupported.value() =
707  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
708  "FullyConnected weights are set as dynamic (non constant). ";
709  }
710 
711  return false;
712  }
713  if (descriptor.m_BiasEnabled && !biases.IsConstant())
714  {
715  if (reasonIfUnsupported.has_value())
716  {
717  reasonIfUnsupported.value() =
718  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
719  "FullyConnected biases are set as dynamic (non constant). ";
720  }
721  return false;
722  }
723 
724  // At the first stage we will only print a warning. this is to give
725  // backend developers a chance to adopt and read weights from input slots.
726  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
727  "If you are a backend developer please find more information in our "
728  "doxygen documentation on github https://github.com/ARM-software/armnn "
729  "under the keyword 'ConstTensorsAsInputs'.";
730  }
731  }
732 
733  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
734  infos,
735  descriptor,
736  EmptyOptional(),
737  EmptyOptional(),
738  reasonIfUnsupported);
739 }

References ARMNN_LOG, armnn::FullyConnected, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), FullyConnectedDescriptor::m_BiasEnabled, FullyConnectedDescriptor::m_ConstantWeights, OptionalReferenceSwitch< IsReference, T >::value(), OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), and armnn::warning.

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 757 of file BackendHelper.cpp.

761 {
762  TensorInfos infos{input0, input1, output};
763 
764  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
765  infos,
766  BaseDescriptor(),
767  EmptyOptional(),
768  EmptyOptional(),
769  reasonIfUnsupported);
770 }

References armnn::GatherNd.

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 741 of file BackendHelper.cpp.

746 {
747  TensorInfos infos{input0, input1, output};
748 
749  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
750  infos,
751  descriptor,
752  EmptyOptional(),
753  EmptyOptional(),
754  reasonIfUnsupported);
755 }

References armnn::Gather.

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 772 of file BackendHelper.cpp.

774 {
775  TensorInfos infos{input};
776 
777  return m_LayerSupport->IsLayerSupported(LayerType::Input,
778  infos,
779  BaseDescriptor(),
780  EmptyOptional(),
781  EmptyOptional(),
782  reasonIfUnsupported);
783 }

References armnn::Input.

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 785 of file BackendHelper.cpp.

790 {
791  TensorInfos infos{input, output};
792 
793  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
794  infos,
795  descriptor,
796  EmptyOptional(),
797  EmptyOptional(),
798  reasonIfUnsupported);
799 }

References armnn::InstanceNormalization.

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 801 of file BackendHelper.cpp.

805 {
806  TensorInfos infos{input, output};
807 
808  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
809  infos,
810  descriptor,
811  EmptyOptional(),
812  EmptyOptional(),
813  reasonIfUnsupported);
814 }

References armnn::L2Normalization.

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 816 of file BackendHelper.cpp.

821 {
822  TensorInfos infos{input0, input1, output};
823 
824  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
825  infos,
826  descriptor,
827  EmptyOptional(),
828  EmptyOptional(),
829  reasonIfUnsupported);
830 }

References armnn::LogicalBinary.

◆ IsLogicalUnarySupported()

bool IsLogicalUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 832 of file BackendHelper.cpp.

836 {
837  TensorInfos infos{input, output};
838 
839  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
840  infos,
841  descriptor,
842  EmptyOptional(),
843  EmptyOptional(),
844  reasonIfUnsupported);
845 }

References armnn::ElementwiseUnary.

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 847 of file BackendHelper.cpp.

851 {
852  TensorInfos infos{input, output};
853 
854  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
855  infos,
856  descriptor,
857  EmptyOptional(),
858  EmptyOptional(),
859  reasonIfUnsupported);
860 }

References armnn::LogSoftmax.

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 862 of file BackendHelper.cpp.

872 {
873  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
874 
875  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
876  infos,
877  descriptor,
878  paramsInfo,
879  EmptyOptional(),
880  reasonIfUnsupported);
881 }

References armnn::Lstm.

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 883 of file BackendHelper.cpp.

887 {
888  TensorInfos infos{input0, input1, output};
889 
890  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
891  infos,
892  BaseDescriptor(),
893  EmptyOptional(),
894  EmptyOptional(),
895  reasonIfUnsupported);
896 }

References armnn::Maximum.

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 898 of file BackendHelper.cpp.

902 {
903  TensorInfos infos{input, output};
904 
905  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
906  infos,
907  descriptor,
908  EmptyOptional(),
909  EmptyOptional(),
910  reasonIfUnsupported);
911 }

References armnn::Mean.

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 913 of file BackendHelper.cpp.

916 {
917  TensorInfos infos{input, output};
918 
919  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
920  infos,
921  BaseDescriptor(),
922  EmptyOptional(),
923  EmptyOptional(),
924  reasonIfUnsupported);
925 }

References armnn::MemCopy.

◆ IsMemImportSupported()

bool IsMemImportSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 927 of file BackendHelper.cpp.

930 {
931  TensorInfos infos{input, output};
932 
933  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
934  infos,
935  BaseDescriptor(),
936  EmptyOptional(),
937  EmptyOptional(),
938  reasonIfUnsupported);
939 }

References armnn::MemImport.

◆ IsMergeSupported()

bool IsMergeSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 941 of file BackendHelper.cpp.

945 {
946  TensorInfos infos{input0, input1, output};
947 
948  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
949  infos,
950  BaseDescriptor(),
951  EmptyOptional(),
952  EmptyOptional(),
953  reasonIfUnsupported);
954 }

References armnn::Merge.

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 956 of file BackendHelper.cpp.

960 {
961  TensorInfos infos{input0, input1, output};
962 
963  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
964  infos,
965  BaseDescriptor(),
966  EmptyOptional(),
967  EmptyOptional(),
968  reasonIfUnsupported);
969 }

References armnn::Minimum.

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 971 of file BackendHelper.cpp.

975 {
976  TensorInfos infos{input0, input1, output};
977 
978  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
979  infos,
980  BaseDescriptor(),
981  EmptyOptional(),
982  EmptyOptional(),
983  reasonIfUnsupported);
984 }

References armnn::Multiplication.

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 986 of file BackendHelper.cpp.

990 {
991  TensorInfos infos{input, output};
992 
993  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
994  infos,
995  descriptor,
996  EmptyOptional(),
997  EmptyOptional(),
998  reasonIfUnsupported);
999 }

References armnn::Normalization.

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1001 of file BackendHelper.cpp.

1003 {
1004  TensorInfos infos{output};
1005 
1006  return m_LayerSupport->IsLayerSupported(LayerType::Output,
1007  infos,
1008  BaseDescriptor(),
1009  EmptyOptional(),
1010  EmptyOptional(),
1011  reasonIfUnsupported);
1012 }

References armnn::Output.

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1014 of file BackendHelper.cpp.

1018 {
1019  TensorInfos infos{input, output};
1020 
1021  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
1022  infos,
1023  descriptor,
1024  EmptyOptional(),
1025  EmptyOptional(),
1026  reasonIfUnsupported);
1027 }

References armnn::Pad.

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1029 of file BackendHelper.cpp.

1033 {
1034  TensorInfos infos{input, output};
1035 
1036  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
1037  infos,
1038  descriptor,
1039  EmptyOptional(),
1040  EmptyOptional(),
1041  reasonIfUnsupported);
1042 }

References armnn::Permute.

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1044 of file BackendHelper.cpp.

1048 {
1049  TensorInfos infos{input, output};
1050 
1051  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
1052  infos,
1053  descriptor,
1054  EmptyOptional(),
1055  EmptyOptional(),
1056  reasonIfUnsupported);
1057 }

References armnn::Pooling2d.

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1059 of file BackendHelper.cpp.

1063 {
1064  TensorInfos infos{input, output};
1065 
1066  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
1067  infos,
1068  descriptor,
1069  EmptyOptional(),
1070  EmptyOptional(),
1071  reasonIfUnsupported);
1072 }

References armnn::Pooling3d.

◆ IsPreCompiledSupported()

bool IsPreCompiledSupported ( const TensorInfo input,
const PreCompiledDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1074 of file BackendHelper.cpp.

1077 {
1078  TensorInfos infos{input};
1079 
1080  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
1081  infos,
1082  descriptor,
1083  EmptyOptional(),
1084  EmptyOptional(),
1085  reasonIfUnsupported);
1086 }

References armnn::PreCompiled.

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1088 of file BackendHelper.cpp.

1092 {
1093  TensorInfos infos{input, alpha, output};
1094 
1095  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
1096  infos,
1097  BaseDescriptor(),
1098  EmptyOptional(),
1099  EmptyOptional(),
1100  reasonIfUnsupported);
1101 }

References armnn::Prelu.

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1117 of file BackendHelper.cpp.

1126 {
1127  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1128 
1129  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1130  infos,
1131  descriptor,
1132  paramsInfo,
1133  EmptyOptional(),
1134  reasonIfUnsupported);
1135 }

References armnn::QLstm.

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo previousCellStateIn,
const TensorInfo previousOutputIn,
const TensorInfo cellStateOut,
const TensorInfo output,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1137 of file BackendHelper.cpp.

1144 {
1145  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1146 
1147  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1148  infos,
1149  BaseDescriptor(),
1150  EmptyOptional(),
1151  paramsInfo,
1152  reasonIfUnsupported);
1153 }

References armnn::QuantizedLstm.

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1103 of file BackendHelper.cpp.

1106 {
1107  TensorInfos infos{input, output};
1108 
1109  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
1110  infos,
1111  BaseDescriptor(),
1112  EmptyOptional(),
1113  EmptyOptional(),
1114  reasonIfUnsupported);
1115 }

References armnn::Quantize.

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1155 of file BackendHelper.cpp.

1158 {
1159  TensorInfos infos{input, output};
1160 
1161  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1162  infos,
1163  BaseDescriptor(),
1164  EmptyOptional(),
1165  EmptyOptional(),
1166  reasonIfUnsupported);
1167 }

References armnn::Rank.

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1169 of file BackendHelper.cpp.

1173 {
1174  TensorInfos infos{input, output};
1175 
1176  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1177  infos,
1178  descriptor,
1179  EmptyOptional(),
1180  EmptyOptional(),
1181  reasonIfUnsupported);
1182 }

References armnn::Reduce.

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1184 of file BackendHelper.cpp.

1188 {
1189  TensorInfos infos{input, output};
1190 
1191  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1192  infos,
1193  descriptor,
1194  EmptyOptional(),
1195  EmptyOptional(),
1196  reasonIfUnsupported);
1197 }

References armnn::Reshape.

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1199 of file BackendHelper.cpp.

1203 {
1204  TensorInfos infos{input, output};
1205 
1206  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1207  infos,
1208  descriptor,
1209  EmptyOptional(),
1210  EmptyOptional(),
1211  reasonIfUnsupported);
1212 }

References armnn::Resize.

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1214 of file BackendHelper.cpp.

1217 {
1218  TensorInfos infos{input, output};
1219 
1220  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1221  infos,
1222  BaseDescriptor(),
1223  EmptyOptional(),
1224  EmptyOptional(),
1225  reasonIfUnsupported);
1226 }

References armnn::Shape.

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1228 of file BackendHelper.cpp.

1232 {
1233  TensorInfos infos{input, output};
1234 
1235  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1236  infos,
1237  descriptor,
1238  EmptyOptional(),
1239  EmptyOptional(),
1240  reasonIfUnsupported);
1241 }

References armnn::Slice.

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1243 of file BackendHelper.cpp.

1247 {
1248  TensorInfos infos{input, output};
1249 
1250  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1251  infos,
1252  descriptor,
1253  EmptyOptional(),
1254  EmptyOptional(),
1255  reasonIfUnsupported);
1256 }

References armnn::Softmax.

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1258 of file BackendHelper.cpp.

1262 {
1263  TensorInfos infos{input, output};
1264 
1265  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1266  infos,
1267  descriptor,
1268  EmptyOptional(),
1269  EmptyOptional(),
1270  reasonIfUnsupported);
1271 }

References armnn::SpaceToBatchNd.

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1273 of file BackendHelper.cpp.

1277 {
1278  TensorInfos infos{input, output};
1279 
1280  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1281  infos,
1282  descriptor,
1283  EmptyOptional(),
1284  EmptyOptional(),
1285  reasonIfUnsupported);
1286 }

References armnn::SpaceToDepth.

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1288 of file BackendHelper.cpp.

1292 {
1293  TensorInfos infos{input};
1294  for (TensorInfo outInfo : outputs)
1295  {
1296  infos.push_back(outInfo);
1297  }
1298 
1299  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1300  infos,
1301  descriptor,
1302  EmptyOptional(),
1303  EmptyOptional(),
1304  reasonIfUnsupported);
1305 }

References armnn::Splitter.

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1307 of file BackendHelper.cpp.

1311 {
1312  TensorInfos infos;
1313  for (const TensorInfo* inputInfo : inputs)
1314  {
1315  infos.push_back(*inputInfo);
1316  }
1317  infos.push_back(output);
1318 
1319  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1320  infos,
1321  descriptor,
1322  EmptyOptional(),
1323  EmptyOptional(),
1324  reasonIfUnsupported);
1325 }

References armnn::Stack.

◆ IsStandInSupported()

bool IsStandInSupported ( const std::vector< const TensorInfo * > &  inputs,
const std::vector< const TensorInfo * > &  outputs,
const StandInDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1327 of file BackendHelper.cpp.

1331 {
1332  TensorInfos infos;
1333  for (const TensorInfo* inputInfo : inputs)
1334  {
1335  infos.push_back(*inputInfo);
1336  }
1337  for (const TensorInfo* outputInfo : outputs)
1338  {
1339  infos.push_back(*outputInfo);
1340  }
1341 
1342  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1343  infos,
1344  descriptor,
1345  EmptyOptional(),
1346  EmptyOptional(),
1347  reasonIfUnsupported);
1348 }

References armnn::StandIn.

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1351 of file BackendHelper.cpp.

1355 {
1356  TensorInfos infos{input, output};
1357 
1358  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1359  infos,
1360  descriptor,
1361  EmptyOptional(),
1362  EmptyOptional(),
1363  reasonIfUnsupported);
1364 }

References armnn::StridedSlice.

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1366 of file BackendHelper.cpp.

1370 {
1371  TensorInfos infos{input0, input1, output};
1372 
1373  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1374  infos,
1375  BaseDescriptor(),
1376  EmptyOptional(),
1377  EmptyOptional(),
1378  reasonIfUnsupported);
1379 }

References armnn::Subtraction.

◆ IsSwitchSupported()

bool IsSwitchSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output0,
const TensorInfo output1,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1381 of file BackendHelper.cpp.

1386 {
1387  TensorInfos infos{input0, input1, output0, output1};
1388 
1389  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1390  infos,
1391  BaseDescriptor(),
1392  EmptyOptional(),
1393  EmptyOptional(),
1394  reasonIfUnsupported);
1395 }

References armnn::Switch.

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1397 of file BackendHelper.cpp.

1404 {
1405  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1406  TensorInfos infos{input, output, weights, biasesVal};
1407 
1408  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1409  infos,
1410  descriptor,
1411  EmptyOptional(),
1412  EmptyOptional(),
1413  reasonIfUnsupported);
1414 }

References OptionalBase::has_value(), armnn::TransposeConvolution2d, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1416 of file BackendHelper.cpp.

1420 {
1421  TensorInfos infos{input, output};
1422 
1423  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1424  infos,
1425  descriptor,
1426  EmptyOptional(),
1427  EmptyOptional(),
1428  reasonIfUnsupported);
1429 }

References armnn::Transpose.

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1431 of file BackendHelper.cpp.

1440 {
1441  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
1442 
1443  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1444  infos,
1445  descriptor,
1446  paramsInfo,
1447  EmptyOptional(),
1448  reasonIfUnsupported);
1449 }

References armnn::UnidirectionalSequenceLstm.


The documentation for this class was generated from the following files:
armnn::LayerType::Floor
@ Floor
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::GetCapability
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
Definition: BackendHelper.cpp:37
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Shape
@ Shape
armnn::LayerType::Merge
@ Merge
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::LayerType::Reduce
@ Reduce
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::LayerType::Division
@ Division
armnn::BackendId::IsUndefined
bool IsUndefined() const
Definition: BackendId.hpp:141
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Rank
@ Rank
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::Compute::Undefined
@ Undefined
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::Mean
@ Mean
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::Switch
@ Switch
armnn::TensorInfos
std::vector< TensorInfo > TensorInfos
Definition: BackendHelper.cpp:142
armnn::LayerType::Concat
@ Concat
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::Output
@ Output
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::PreCompiled
@ PreCompiled