ArmNN
 23.08
LayerSupportHandle Class Reference

#include <BackendHelper.hpp>

Public Member Functions

 LayerSupportHandle (std::shared_ptr< ILayerSupport > layerSupport)
 
 LayerSupportHandle (std::shared_ptr< ILayerSupport > layerSupport, const BackendId &backendId)
 
bool IsBackendRegistered () const
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsBatchMatMulSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsElementwiseBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsReverseV2Supported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsTileSupported (const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
 

Detailed Description

Definition at line 29 of file BackendHelper.hpp.

Constructor & Destructor Documentation

◆ LayerSupportHandle() [1/2]

LayerSupportHandle ( std::shared_ptr< ILayerSupport layerSupport)
inlineexplicit

Definition at line 32 of file BackendHelper.hpp.

33  : m_LayerSupport(std::move(layerSupport)), m_BackendId(Compute::Undefined) {};

References armnn::Undefined.

◆ LayerSupportHandle() [2/2]

LayerSupportHandle ( std::shared_ptr< ILayerSupport layerSupport,
const BackendId backendId 
)
inlineexplicit

Definition at line 35 of file BackendHelper.hpp.

36  : m_LayerSupport(std::move(layerSupport)), m_BackendId(backendId) {};

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 154 of file BackendHelper.cpp.

158 {
159  TensorInfos infos{input, output};
160 
161  return m_LayerSupport->IsLayerSupported(LayerType::Activation,
162  infos,
163  descriptor,
164  EmptyOptional(),
165  EmptyOptional(),
166  reasonIfUnsupported);
167 }

References armnn::Activation.

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 169 of file BackendHelper.cpp.

173 {
174  TensorInfos infos{input0, input1, output};
175 
176  return m_LayerSupport->IsLayerSupported(LayerType::Addition,
177  infos,
178  BaseDescriptor(),
179  EmptyOptional(),
180  EmptyOptional(),
181  reasonIfUnsupported);
182 }

References armnn::Addition.

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 184 of file BackendHelper.cpp.

188 {
189  TensorInfos infos{input, output};
190 
191  return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
192  infos,
193  descriptor,
194  EmptyOptional(),
195  EmptyOptional(),
196  reasonIfUnsupported);
197 }

References armnn::ArgMinMax.

◆ IsBackendRegistered()

bool IsBackendRegistered ( ) const

Definition at line 142 of file BackendHelper.cpp.

143 {
144  if (m_LayerSupport)
145  {
146  return true;
147  }
148 
149  return false;
150 }

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 199 of file BackendHelper.cpp.

204 {
205  TensorInfos infos{input0, input1, output};
206 
207  return m_LayerSupport->IsLayerSupported(LayerType::BatchMatMul,
208  infos,
209  descriptor,
210  EmptyOptional(),
211  EmptyOptional(),
212  reasonIfUnsupported);
213 }

References armnn::BatchMatMul.

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 215 of file BackendHelper.cpp.

223 {
224  TensorInfos infos{input, output, mean, var, beta, gamma};
225 
226  return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
227  infos,
228  descriptor,
229  EmptyOptional(),
230  EmptyOptional(),
231  reasonIfUnsupported);
232 }

References armnn::BatchNormalization.

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 234 of file BackendHelper.cpp.

238 {
239  TensorInfos infos{input, output};
240 
241  return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
242  infos,
243  descriptor,
244  EmptyOptional(),
245  EmptyOptional(),
246  reasonIfUnsupported);
247 }

References armnn::BatchToSpaceNd.

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 249 of file BackendHelper.cpp.

252 {
253  TensorInfos infos{input, output};
254 
255  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
256  infos,
257  BaseDescriptor(),
258  EmptyOptional(),
259  EmptyOptional(),
260  reasonIfUnsupported);
261 }

References armnn::Cast.

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 263 of file BackendHelper.cpp.

267 {
268  TensorInfos infos{input, output};
269 
270  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
271  infos,
272  descriptor,
273  EmptyOptional(),
274  EmptyOptional(),
275  reasonIfUnsupported);
276 }

References armnn::ChannelShuffle.

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 278 of file BackendHelper.cpp.

283 {
284  TensorInfos infos{input0, input1, output};
285 
286  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
287  infos,
288  descriptor,
289  EmptyOptional(),
290  EmptyOptional(),
291  reasonIfUnsupported);
292 }

References armnn::Comparison.

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 294 of file BackendHelper.cpp.

298 {
299  TensorInfos infos;
300  for (const TensorInfo* inputInfo : inputs)
301  {
302  infos.push_back(*inputInfo);
303  }
304  infos.push_back(output);
305 
306  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
307  infos,
308  descriptor,
309  EmptyOptional(),
310  EmptyOptional(),
311  reasonIfUnsupported);
312 }

References armnn::Concat.

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 314 of file BackendHelper.cpp.

316 {
317  TensorInfos infos{output};
318 
319  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
320  infos,
321  BaseDescriptor(),
322  EmptyOptional(),
323  EmptyOptional(),
324  reasonIfUnsupported);
325 }

References armnn::Constant.

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 327 of file BackendHelper.cpp.

330 {
331  TensorInfos infos{input, output};
332 
333  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
334  infos,
335  BaseDescriptor(),
336  EmptyOptional(),
337  EmptyOptional(),
338  reasonIfUnsupported);
339 }

References armnn::ConvertFp16ToFp32.

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 341 of file BackendHelper.cpp.

344 {
345  TensorInfos infos{input, output};
346 
347  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
348  infos,
349  BaseDescriptor(),
350  EmptyOptional(),
351  EmptyOptional(),
352  reasonIfUnsupported);
353 }

References armnn::ConvertFp32ToFp16.

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 355 of file BackendHelper.cpp.

361 {
362  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
363  TensorInfos infos{input, output, weights, biasesVal};
364 
365  Optional<const BackendOptions::BackendOption> capability ;
366  if (!m_BackendId.IsUndefined())
367  {
368  capability = GetCapability("NonConstWeights", m_BackendId);
369  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
370  {
371  if (!weights.IsConstant())
372  {
373  if (reasonIfUnsupported.has_value())
374  {
375  reasonIfUnsupported.value() =
376  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
377  "Convolution2d weights are set as dynamic (non constant). ";
378  }
379  return false;
380  }
381  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
382  {
383  if (reasonIfUnsupported.has_value())
384  {
385  reasonIfUnsupported.value() =
386  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
387  "Convolution2d biases are set as dynamic (non constant). ";
388  }
389  return false;
390  }
391 
392  // At the first stage we will only print a warning. this is to give
393  // backend developers a chance to adopt and read weights from input slots.
394  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
395  "If you are a backend developer please find more information in our "
396  "doxygen documentation on github https://github.com/ARM-software/armnn "
397  "under the keyword 'ConstTensorsAsInputs'.";
398  }
399  }
400 
401  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
402  infos,
403  descriptor,
404  EmptyOptional(),
405  EmptyOptional(),
406  reasonIfUnsupported);
407 }

References ARMNN_LOG, armnn::Convolution2d, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), Convolution2dDescriptor::m_BiasEnabled, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), and armnn::warning.

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 409 of file BackendHelper.cpp.

415 {
416  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
417  TensorInfos infos{input, output, weights, biasesVal};
418 
419  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
420  infos,
421  descriptor,
422  EmptyOptional(),
423  EmptyOptional(),
424  reasonIfUnsupported);
425 }

References armnn::Convolution3d, OptionalBase::has_value(), and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 427 of file BackendHelper.cpp.

430 {
431  TensorInfos infos{input, output};
432 
433  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
434  infos,
435  BaseDescriptor(),
436  EmptyOptional(),
437  EmptyOptional(),
438  reasonIfUnsupported);
439 }

References armnn::Debug.

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 441 of file BackendHelper.cpp.

445 {
446  TensorInfos infos{input, output};
447 
448  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
449  infos,
450  descriptor,
451  EmptyOptional(),
452  EmptyOptional(),
453  reasonIfUnsupported);
454 }

References armnn::DepthToSpace.

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 456 of file BackendHelper.cpp.

463 {
464  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
465  TensorInfos infos{input, output, weights, biasesVal};
466 
467  Optional<const BackendOptions::BackendOption> capability ;
468  if (!m_BackendId.IsUndefined())
469  {
470  capability = GetCapability("NonConstWeights", m_BackendId);
471  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
472  {
473  if (!weights.IsConstant())
474  {
475  if (reasonIfUnsupported.has_value())
476  {
477  reasonIfUnsupported.value() =
478  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
479  "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
480  }
481  return false;
482  }
483  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
484  {
485  if (reasonIfUnsupported.has_value())
486  {
487  reasonIfUnsupported.value() =
488  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
489  "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
490  }
491  return false;
492  }
493  // At the first stage we will only print a warning. this is to give
494  // backend developers a chance to adopt and read weights from input slots.
495  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
496  "If you are a backend developer please find more information in our "
497  "doxygen documentation on github https://github.com/ARM-software/armnn "
498  "under the keyword 'ConstTensorsAsInputs'.";
499  }
500  }
501 
502  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
503  infos,
504  descriptor,
505  EmptyOptional(),
506  EmptyOptional(),
507  reasonIfUnsupported);
508 }

References ARMNN_LOG, armnn::DepthwiseConvolution2d, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), and armnn::warning.

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 510 of file BackendHelper.cpp.

513 {
514  TensorInfos infos{input, output};
515 
516  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
517  infos,
518  BaseDescriptor(),
519  EmptyOptional(),
520  EmptyOptional(),
521  reasonIfUnsupported);
522 }

References armnn::Dequantize.

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 524 of file BackendHelper.cpp.

533 {
534  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
535 
536  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
537  infos,
538  descriptor,
539  EmptyOptional(),
540  EmptyOptional(),
541  reasonIfUnsupported);
542 }

References armnn::DetectionPostProcess.

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 544 of file BackendHelper.cpp.

551 {
552  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
553  TensorInfos infos{input, output, weights, biasesVal};
554 
555  Optional<const BackendOptions::BackendOption> capability ;
556  if (!m_BackendId.IsUndefined())
557  {
558  capability = GetCapability("NonConstWeights", m_BackendId);
559  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
560  {
561  if (!weights.IsConstant())
562  {
563  if (reasonIfUnsupported.has_value())
564  {
565  reasonIfUnsupported.value() =
566  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
567  "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
568  }
569  return false;
570  }
571  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
572  {
573  if (reasonIfUnsupported.has_value())
574  {
575  reasonIfUnsupported.value() =
576  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
577  "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
578  }
579  return false;
580  }
581  // At the first stage we will only print a warning. this is to give
582  // backend developers a chance to adopt and read weights from input slots.
583  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
584  "If you are a backend developer please find more information in our "
585  "doxygen documentation on github https://github.com/ARM-software/armnn "
586  "under the keyword 'ConstTensorsAsInputs'.";
587  }
588  }
589 
590  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
591  infos,
592  descriptor,
593  EmptyOptional(),
594  EmptyOptional(),
595  reasonIfUnsupported);
596 }

References ARMNN_LOG, armnn::DepthwiseConvolution2d, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), and armnn::warning.

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 598 of file BackendHelper.cpp.

602 {
603  TensorInfos infos{input0, input1, output};
604 
605  return m_LayerSupport->IsLayerSupported(LayerType::Division,
606  infos,
607  BaseDescriptor(),
608  EmptyOptional(),
609  EmptyOptional(),
610  reasonIfUnsupported);
611 }

References armnn::Division.

◆ IsElementwiseBinarySupported()

bool IsElementwiseBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ElementwiseBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 613 of file BackendHelper.cpp.

618 {
619  TensorInfos infos{input0, input1, output};
620 
621  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
622  infos,
623  descriptor,
624  EmptyOptional(),
625  EmptyOptional(),
626  reasonIfUnsupported);
627 }

References armnn::ElementwiseBinary.

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 629 of file BackendHelper.cpp.

633 {
634  TensorInfos infos{input, output};
635 
636  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
637  infos,
638  descriptor,
639  EmptyOptional(),
640  EmptyOptional(),
641  reasonIfUnsupported);
642 }

References armnn::ElementwiseUnary.

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 644 of file BackendHelper.cpp.

647 {
648  TensorInfos infos{input};
649 
650  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
651  infos,
652  descriptor,
653  EmptyOptional(),
654  EmptyOptional(),
655  reasonIfUnsupported);
656 }

References armnn::FakeQuantization.

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 658 of file BackendHelper.cpp.

662 {
663  TensorInfos infos{input, output};
664 
665  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
666  infos,
667  descriptor,
668  EmptyOptional(),
669  EmptyOptional(),
670  reasonIfUnsupported);
671 }

References armnn::Fill.

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 673 of file BackendHelper.cpp.

676 {
677  TensorInfos infos{input, output};
678 
679  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
680  infos,
681  BaseDescriptor(),
682  EmptyOptional(),
683  EmptyOptional(),
684  reasonIfUnsupported);
685 }

References armnn::Floor.

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 687 of file BackendHelper.cpp.

693 {
694  TensorInfos infos{input, output, weights, biases};
695 
696  Optional<const BackendOptions::BackendOption> capability;
697  if (!m_BackendId.IsUndefined())
698  {
699  capability = GetCapability("NonConstWeights", m_BackendId);
700  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
701  {
702  if (!descriptor.m_ConstantWeights)
703  {
704  if (reasonIfUnsupported.has_value())
705  {
706  reasonIfUnsupported.value() =
707  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
708  "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
709  }
710  return false;
711  }
712  if (!weights.IsConstant())
713  {
714  if (reasonIfUnsupported.has_value())
715  {
716  reasonIfUnsupported.value() =
717  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
718  "FullyConnected weights are set as dynamic (non constant). ";
719  }
720 
721  return false;
722  }
723  if (descriptor.m_BiasEnabled && !biases.IsConstant())
724  {
725  if (reasonIfUnsupported.has_value())
726  {
727  reasonIfUnsupported.value() =
728  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
729  "FullyConnected biases are set as dynamic (non constant). ";
730  }
731  return false;
732  }
733 
734  // At the first stage we will only print a warning. this is to give
735  // backend developers a chance to adopt and read weights from input slots.
736  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
737  "If you are a backend developer please find more information in our "
738  "doxygen documentation on github https://github.com/ARM-software/armnn "
739  "under the keyword 'ConstTensorsAsInputs'.";
740  }
741  }
742 
743  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
744  infos,
745  descriptor,
746  EmptyOptional(),
747  EmptyOptional(),
748  reasonIfUnsupported);
749 }

References ARMNN_LOG, armnn::FullyConnected, armnn::GetCapability(), OptionalBase::has_value(), TensorInfo::IsConstant(), BackendId::IsUndefined(), FullyConnectedDescriptor::m_BiasEnabled, FullyConnectedDescriptor::m_ConstantWeights, OptionalReferenceSwitch< std::is_reference< T >::value, T >::value(), and armnn::warning.

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 767 of file BackendHelper.cpp.

771 {
772  TensorInfos infos{input0, input1, output};
773 
774  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
775  infos,
776  BaseDescriptor(),
777  EmptyOptional(),
778  EmptyOptional(),
779  reasonIfUnsupported);
780 }

References armnn::GatherNd.

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 751 of file BackendHelper.cpp.

756 {
757  TensorInfos infos{input0, input1, output};
758 
759  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
760  infos,
761  descriptor,
762  EmptyOptional(),
763  EmptyOptional(),
764  reasonIfUnsupported);
765 }

References armnn::Gather.

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 782 of file BackendHelper.cpp.

784 {
785  TensorInfos infos{input};
786 
787  return m_LayerSupport->IsLayerSupported(LayerType::Input,
788  infos,
789  BaseDescriptor(),
790  EmptyOptional(),
791  EmptyOptional(),
792  reasonIfUnsupported);
793 }

References armnn::Input.

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 795 of file BackendHelper.cpp.

800 {
801  TensorInfos infos{input, output};
802 
803  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
804  infos,
805  descriptor,
806  EmptyOptional(),
807  EmptyOptional(),
808  reasonIfUnsupported);
809 }

References armnn::InstanceNormalization.

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 811 of file BackendHelper.cpp.

815 {
816  TensorInfos infos{input, output};
817 
818  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
819  infos,
820  descriptor,
821  EmptyOptional(),
822  EmptyOptional(),
823  reasonIfUnsupported);
824 }

References armnn::L2Normalization.

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 826 of file BackendHelper.cpp.

831 {
832  TensorInfos infos{input0, input1, output};
833 
834  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
835  infos,
836  descriptor,
837  EmptyOptional(),
838  EmptyOptional(),
839  reasonIfUnsupported);
840 }

References armnn::LogicalBinary.

◆ IsLogicalUnarySupported()

bool IsLogicalUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 842 of file BackendHelper.cpp.

846 {
847  TensorInfos infos{input, output};
848 
849  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
850  infos,
851  descriptor,
852  EmptyOptional(),
853  EmptyOptional(),
854  reasonIfUnsupported);
855 }

References armnn::ElementwiseUnary.

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 857 of file BackendHelper.cpp.

861 {
862  TensorInfos infos{input, output};
863 
864  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
865  infos,
866  descriptor,
867  EmptyOptional(),
868  EmptyOptional(),
869  reasonIfUnsupported);
870 }

References armnn::LogSoftmax.

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 872 of file BackendHelper.cpp.

882 {
883  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
884 
885  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
886  infos,
887  descriptor,
888  paramsInfo,
889  EmptyOptional(),
890  reasonIfUnsupported);
891 }

References armnn::Lstm.

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 893 of file BackendHelper.cpp.

897 {
898  TensorInfos infos{input0, input1, output};
899 
900  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
901  infos,
902  BaseDescriptor(),
903  EmptyOptional(),
904  EmptyOptional(),
905  reasonIfUnsupported);
906 }

References armnn::Maximum.

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 908 of file BackendHelper.cpp.

912 {
913  TensorInfos infos{input, output};
914 
915  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
916  infos,
917  descriptor,
918  EmptyOptional(),
919  EmptyOptional(),
920  reasonIfUnsupported);
921 }

References armnn::Mean.

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 923 of file BackendHelper.cpp.

926 {
927  TensorInfos infos{input, output};
928 
929  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
930  infos,
931  BaseDescriptor(),
932  EmptyOptional(),
933  EmptyOptional(),
934  reasonIfUnsupported);
935 }

References armnn::MemCopy.

◆ IsMemImportSupported()

bool IsMemImportSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 937 of file BackendHelper.cpp.

940 {
941  TensorInfos infos{input, output};
942 
943  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
944  infos,
945  BaseDescriptor(),
946  EmptyOptional(),
947  EmptyOptional(),
948  reasonIfUnsupported);
949 }

References armnn::MemImport.

◆ IsMergeSupported()

bool IsMergeSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 951 of file BackendHelper.cpp.

955 {
956  TensorInfos infos{input0, input1, output};
957 
958  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
959  infos,
960  BaseDescriptor(),
961  EmptyOptional(),
962  EmptyOptional(),
963  reasonIfUnsupported);
964 }

References armnn::Merge.

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 966 of file BackendHelper.cpp.

970 {
971  TensorInfos infos{input0, input1, output};
972 
973  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
974  infos,
975  BaseDescriptor(),
976  EmptyOptional(),
977  EmptyOptional(),
978  reasonIfUnsupported);
979 }

References armnn::Minimum.

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 981 of file BackendHelper.cpp.

985 {
986  TensorInfos infos{input0, input1, output};
987 
988  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
989  infos,
990  BaseDescriptor(),
991  EmptyOptional(),
992  EmptyOptional(),
993  reasonIfUnsupported);
994 }

References armnn::Multiplication.

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 996 of file BackendHelper.cpp.

1000 {
1001  TensorInfos infos{input, output};
1002 
1003  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
1004  infos,
1005  descriptor,
1006  EmptyOptional(),
1007  EmptyOptional(),
1008  reasonIfUnsupported);
1009 }

References armnn::Normalization.

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1011 of file BackendHelper.cpp.

1013 {
1014  TensorInfos infos{output};
1015 
1016  return m_LayerSupport->IsLayerSupported(LayerType::Output,
1017  infos,
1018  BaseDescriptor(),
1019  EmptyOptional(),
1020  EmptyOptional(),
1021  reasonIfUnsupported);
1022 }

References armnn::Output.

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1024 of file BackendHelper.cpp.

1028 {
1029  TensorInfos infos{input, output};
1030 
1031  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
1032  infos,
1033  descriptor,
1034  EmptyOptional(),
1035  EmptyOptional(),
1036  reasonIfUnsupported);
1037 }

References armnn::Pad.

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1039 of file BackendHelper.cpp.

1043 {
1044  TensorInfos infos{input, output};
1045 
1046  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
1047  infos,
1048  descriptor,
1049  EmptyOptional(),
1050  EmptyOptional(),
1051  reasonIfUnsupported);
1052 }

References armnn::Permute.

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1054 of file BackendHelper.cpp.

1058 {
1059  TensorInfos infos{input, output};
1060 
1061  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
1062  infos,
1063  descriptor,
1064  EmptyOptional(),
1065  EmptyOptional(),
1066  reasonIfUnsupported);
1067 }

References armnn::Pooling2d.

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1069 of file BackendHelper.cpp.

1073 {
1074  TensorInfos infos{input, output};
1075 
1076  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
1077  infos,
1078  descriptor,
1079  EmptyOptional(),
1080  EmptyOptional(),
1081  reasonIfUnsupported);
1082 }

References armnn::Pooling3d.

◆ IsPreCompiledSupported()

bool IsPreCompiledSupported ( const TensorInfo input,
const PreCompiledDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1084 of file BackendHelper.cpp.

1087 {
1088  TensorInfos infos{input};
1089 
1090  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
1091  infos,
1092  descriptor,
1093  EmptyOptional(),
1094  EmptyOptional(),
1095  reasonIfUnsupported);
1096 }

References armnn::PreCompiled.

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1098 of file BackendHelper.cpp.

1102 {
1103  TensorInfos infos{input, alpha, output};
1104 
1105  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
1106  infos,
1107  BaseDescriptor(),
1108  EmptyOptional(),
1109  EmptyOptional(),
1110  reasonIfUnsupported);
1111 }

References armnn::Prelu.

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1127 of file BackendHelper.cpp.

1136 {
1137  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1138 
1139  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1140  infos,
1141  descriptor,
1142  paramsInfo,
1143  EmptyOptional(),
1144  reasonIfUnsupported);
1145 }

References armnn::QLstm.

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo previousCellStateIn,
const TensorInfo previousOutputIn,
const TensorInfo cellStateOut,
const TensorInfo output,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1147 of file BackendHelper.cpp.

1154 {
1155  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1156 
1157  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1158  infos,
1159  BaseDescriptor(),
1160  EmptyOptional(),
1161  paramsInfo,
1162  reasonIfUnsupported);
1163 }

References armnn::QuantizedLstm.

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1113 of file BackendHelper.cpp.

1116 {
1117  TensorInfos infos{input, output};
1118 
1119  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
1120  infos,
1121  BaseDescriptor(),
1122  EmptyOptional(),
1123  EmptyOptional(),
1124  reasonIfUnsupported);
1125 }

References armnn::Quantize.

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1165 of file BackendHelper.cpp.

1168 {
1169  TensorInfos infos{input, output};
1170 
1171  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1172  infos,
1173  BaseDescriptor(),
1174  EmptyOptional(),
1175  EmptyOptional(),
1176  reasonIfUnsupported);
1177 }

References armnn::Rank.

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1179 of file BackendHelper.cpp.

1183 {
1184  TensorInfos infos{input, output};
1185 
1186  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1187  infos,
1188  descriptor,
1189  EmptyOptional(),
1190  EmptyOptional(),
1191  reasonIfUnsupported);
1192 }

References armnn::Reduce.

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1194 of file BackendHelper.cpp.

1198 {
1199  TensorInfos infos{input, output};
1200 
1201  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1202  infos,
1203  descriptor,
1204  EmptyOptional(),
1205  EmptyOptional(),
1206  reasonIfUnsupported);
1207 }

References armnn::Reshape.

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1209 of file BackendHelper.cpp.

1213 {
1214  TensorInfos infos{input, output};
1215 
1216  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1217  infos,
1218  descriptor,
1219  EmptyOptional(),
1220  EmptyOptional(),
1221  reasonIfUnsupported);
1222 }

References armnn::Resize.

◆ IsReverseV2Supported()

bool IsReverseV2Supported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1224 of file BackendHelper.cpp.

1228 {
1229  TensorInfos infos{input0, input1, output};
1230 
1231  return m_LayerSupport->IsLayerSupported(LayerType::ReverseV2,
1232  infos,
1233  BaseDescriptor(),
1234  EmptyOptional(),
1235  EmptyOptional(),
1236  reasonIfUnsupported);
1237 }

References armnn::ReverseV2.

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1239 of file BackendHelper.cpp.

1242 {
1243  TensorInfos infos{input, output};
1244 
1245  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1246  infos,
1247  BaseDescriptor(),
1248  EmptyOptional(),
1249  EmptyOptional(),
1250  reasonIfUnsupported);
1251 }

References armnn::Shape.

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1253 of file BackendHelper.cpp.

1257 {
1258  TensorInfos infos{input, output};
1259 
1260  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1261  infos,
1262  descriptor,
1263  EmptyOptional(),
1264  EmptyOptional(),
1265  reasonIfUnsupported);
1266 }

References armnn::Slice.

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1268 of file BackendHelper.cpp.

1272 {
1273  TensorInfos infos{input, output};
1274 
1275  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1276  infos,
1277  descriptor,
1278  EmptyOptional(),
1279  EmptyOptional(),
1280  reasonIfUnsupported);
1281 }

References armnn::Softmax.

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1283 of file BackendHelper.cpp.

1287 {
1288  TensorInfos infos{input, output};
1289 
1290  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1291  infos,
1292  descriptor,
1293  EmptyOptional(),
1294  EmptyOptional(),
1295  reasonIfUnsupported);
1296 }

References armnn::SpaceToBatchNd.

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1298 of file BackendHelper.cpp.

1302 {
1303  TensorInfos infos{input, output};
1304 
1305  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1306  infos,
1307  descriptor,
1308  EmptyOptional(),
1309  EmptyOptional(),
1310  reasonIfUnsupported);
1311 }

References armnn::SpaceToDepth.

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1313 of file BackendHelper.cpp.

1317 {
1318  TensorInfos infos{input};
1319  for (TensorInfo outInfo : outputs)
1320  {
1321  infos.push_back(outInfo);
1322  }
1323 
1324  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1325  infos,
1326  descriptor,
1327  EmptyOptional(),
1328  EmptyOptional(),
1329  reasonIfUnsupported);
1330 }

References armnn::Splitter.

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1332 of file BackendHelper.cpp.

1336 {
1337  TensorInfos infos;
1338  for (const TensorInfo* inputInfo : inputs)
1339  {
1340  infos.push_back(*inputInfo);
1341  }
1342  infos.push_back(output);
1343 
1344  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1345  infos,
1346  descriptor,
1347  EmptyOptional(),
1348  EmptyOptional(),
1349  reasonIfUnsupported);
1350 }

References armnn::Stack.

◆ IsStandInSupported()

bool IsStandInSupported ( const std::vector< const TensorInfo * > &  inputs,
const std::vector< const TensorInfo * > &  outputs,
const StandInDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1352 of file BackendHelper.cpp.

1356 {
1357  TensorInfos infos;
1358  for (const TensorInfo* inputInfo : inputs)
1359  {
1360  infos.push_back(*inputInfo);
1361  }
1362  for (const TensorInfo* outputInfo : outputs)
1363  {
1364  infos.push_back(*outputInfo);
1365  }
1366 
1367  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1368  infos,
1369  descriptor,
1370  EmptyOptional(),
1371  EmptyOptional(),
1372  reasonIfUnsupported);
1373 }

References armnn::StandIn.

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1376 of file BackendHelper.cpp.

1380 {
1381  TensorInfos infos{input, output};
1382 
1383  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1384  infos,
1385  descriptor,
1386  EmptyOptional(),
1387  EmptyOptional(),
1388  reasonIfUnsupported);
1389 }

References armnn::StridedSlice.

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1391 of file BackendHelper.cpp.

1395 {
1396  TensorInfos infos{input0, input1, output};
1397 
1398  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1399  infos,
1400  BaseDescriptor(),
1401  EmptyOptional(),
1402  EmptyOptional(),
1403  reasonIfUnsupported);
1404 }

References armnn::Subtraction.

◆ IsSwitchSupported()

bool IsSwitchSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output0,
const TensorInfo output1,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1406 of file BackendHelper.cpp.

1411 {
1412  TensorInfos infos{input0, input1, output0, output1};
1413 
1414  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1415  infos,
1416  BaseDescriptor(),
1417  EmptyOptional(),
1418  EmptyOptional(),
1419  reasonIfUnsupported);
1420 }

References armnn::Switch.

◆ IsTileSupported()

bool IsTileSupported ( const TensorInfo input,
const TensorInfo output,
const TileDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1422 of file BackendHelper.cpp.

1426 {
1427  TensorInfos infos{input, output};
1428 
1429  return m_LayerSupport->IsLayerSupported(LayerType::Tile,
1430  infos,
1431  descriptor,
1432  EmptyOptional(),
1433  EmptyOptional(),
1434  reasonIfUnsupported);
1435 }

References armnn::Tile.

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1437 of file BackendHelper.cpp.

1444 {
1445  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1446  TensorInfos infos{input, output, weights, biasesVal};
1447 
1448  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1449  infos,
1450  descriptor,
1451  EmptyOptional(),
1452  EmptyOptional(),
1453  reasonIfUnsupported);
1454 }

References OptionalBase::has_value(), armnn::TransposeConvolution2d, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1456 of file BackendHelper.cpp.

1460 {
1461  TensorInfos infos{input, output};
1462 
1463  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1464  infos,
1465  descriptor,
1466  EmptyOptional(),
1467  EmptyOptional(),
1468  reasonIfUnsupported);
1469 }

References armnn::Transpose.

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
)

Definition at line 1471 of file BackendHelper.cpp.

1480 {
1481  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
1482 
1483  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1484  infos,
1485  descriptor,
1486  paramsInfo,
1487  EmptyOptional(),
1488  reasonIfUnsupported);
1489 }

References armnn::UnidirectionalSequenceLstm.


The documentation for this class was generated from the following files:
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::Compute::Undefined
@ Undefined
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::BackendId::IsUndefined
bool IsUndefined() const
Definition: BackendId.hpp:141
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::Tile
@ Tile
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::TensorInfos
std::vector< TensorInfo > TensorInfos
Definition: BackendHelper.cpp:152
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::LayerType::Slice
@ Slice
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::LayerType::Division
@ Division
armnn::LayerType::Shape
@ Shape
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::Switch
@ Switch
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::GetCapability
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
Definition: BackendHelper.cpp:37