ArmNN
 20.11
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

 NeonLayerSupport (const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
 NeonLayerSupport ()
 
 ~NeonLayerSupport ()
 
bool IsAbsSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsGreaterSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeBilinearSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRsqrtSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsEqualSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 14 of file NeonLayerSupport.hpp.

Constructor & Destructor Documentation

◆ NeonLayerSupport() [1/2]

Definition at line 133 of file NeonLayerSupport.cpp.

134  : m_ModelContextPtr(modelContextPtr)
135 {
136 }

◆ NeonLayerSupport() [2/2]

Definition at line 138 of file NeonLayerSupport.cpp.

139  : m_ModelContextPtr(nullptr)
140 {
141 }

◆ ~NeonLayerSupport()

~NeonLayerSupport ( )
inline

Definition at line 20 of file NeonLayerSupport.hpp.

References ARMNN_DEPRECATED_MSG, NeonLayerSupport::IsAbsSupported(), NeonLayerSupport::IsActivationSupported(), NeonLayerSupport::IsAdditionSupported(), NeonLayerSupport::IsArgMinMaxSupported(), NeonLayerSupport::IsBatchNormalizationSupported(), NeonLayerSupport::IsBatchToSpaceNdSupported(), NeonLayerSupport::IsComparisonSupported(), NeonLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConstantSupported(), NeonLayerSupport::IsConvertBf16ToFp32Supported(), NeonLayerSupport::IsConvertFp16ToFp32Supported(), NeonLayerSupport::IsConvertFp32ToBf16Supported(), NeonLayerSupport::IsConvertFp32ToFp16Supported(), NeonLayerSupport::IsConvolution2dSupported(), NeonLayerSupport::IsDepthToSpaceSupported(), NeonLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsDequantizeSupported(), NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(), NeonLayerSupport::IsDivisionSupported(), NeonLayerSupport::IsElementwiseUnarySupported(), NeonLayerSupport::IsFillSupported(), NeonLayerSupport::IsFloorSupported(), NeonLayerSupport::IsFullyConnectedSupported(), NeonLayerSupport::IsGatherSupported(), NeonLayerSupport::IsGreaterSupported(), NeonLayerSupport::IsInputSupported(), NeonLayerSupport::IsInstanceNormalizationSupported(), NeonLayerSupport::IsL2NormalizationSupported(), NeonLayerSupport::IsLogicalBinarySupported(), NeonLayerSupport::IsLogSoftmaxSupported(), NeonLayerSupport::IsLstmSupported(), NeonLayerSupport::IsMaximumSupported(), NeonLayerSupport::IsMeanSupported(), NeonLayerSupport::IsMergerSupported(), NeonLayerSupport::IsMinimumSupported(), NeonLayerSupport::IsMultiplicationSupported(), NeonLayerSupport::IsNormalizationSupported(), NeonLayerSupport::IsOutputSupported(), NeonLayerSupport::IsPadSupported(), NeonLayerSupport::IsPermuteSupported(), NeonLayerSupport::IsPooling2dSupported(), NeonLayerSupport::IsPreluSupported(), NeonLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQuantizedLstmSupported(), NeonLayerSupport::IsQuantizeSupported(), NeonLayerSupport::IsReshapeSupported(), NeonLayerSupport::IsResizeBilinearSupported(), NeonLayerSupport::IsResizeSupported(), NeonLayerSupport::IsRsqrtSupported(), NeonLayerSupport::IsSliceSupported(), NeonLayerSupport::IsSoftmaxSupported(), NeonLayerSupport::IsSpaceToBatchNdSupported(), NeonLayerSupport::IsSpaceToDepthSupported(), NeonLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsStackSupported(), NeonLayerSupport::IsStridedSliceSupported(), NeonLayerSupport::IsSubtractionSupported(), NeonLayerSupport::IsTransposeConvolution2dSupported(), and NeonLayerSupport::IsTransposeSupported().

20 {}

Member Function Documentation

◆ IsAbsSupported()

bool IsAbsSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 143 of file NeonLayerSupport.cpp.

References armnn::Abs, and NeonLayerSupport::IsElementwiseUnarySupported().

Referenced by NeonLayerSupport::~NeonLayerSupport().

146 {
147  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
148  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
149 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 151 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

155 {
156  IgnoreUnused(descriptor);
158  reasonIfUnsupported,
159  input,
160  output,
161  descriptor);
162 }
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
void IgnoreUnused(Ts &&...)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 164 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

168 {
170  reasonIfUnsupported,
171  input0,
172  input1,
173  output,
174  nullptr);
175 }
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 177 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

181 {
183  reasonIfUnsupported,
184  input,
185  output,
186  descriptor);
187 }
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 189 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

197 {
199  reasonIfUnsupported,
200  input,
201  output,
202  mean,
203  var,
204  beta,
205  gamma,
206  descriptor,
207  nullptr);
208 }
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 210 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

214 {
216  reasonIfUnsupported,
217  input,
218  output,
219  descriptor);
220 }
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 222 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by NeonLayerSupport::IsGreaterSupported(), and NeonLayerSupport::~NeonLayerSupport().

227 {
228 
230  reasonIfUnsupported,
231  input0,
232  input1,
233  output,
234  descriptor);
235 }
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const ConcatDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 237 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by NeonLayerSupport::IsMergerSupported(), and NeonLayerSupport::~NeonLayerSupport().

241 {
242  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
243  {
244  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
245  return false;
246  }
247 
248  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
249  if(concatInnerAxis < 3) // Width, height, or channels
250  {
252  reasonIfUnsupported,
253  inputs,
254  output,
255  descriptor);
256  }
257  else if (concatInnerAxis == 3)
258  {
259  for (auto& input : inputs)
260  {
261  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
262  {
263  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
264  return false;
265  }
266  }
267  return true; // Sub-tensors support concat along batch
268  }
269  else // > 4 dimensions not supported.
270  {
271  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
272  return false;
273  }
274 }
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 276 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

278 {
280  reasonIfUnsupported,
281  output);
282 }
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 284 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

287 {
288  armnn::IgnoreUnused(input);
289  armnn::IgnoreUnused(output);
290  armnn::IgnoreUnused(reasonIfUnsupported);
291  return true;
292 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 294 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

297 {
298  armnn::IgnoreUnused(input);
299  armnn::IgnoreUnused(output);
300  armnn::IgnoreUnused(reasonIfUnsupported);
301  return true;
302 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 304 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

307 {
308  armnn::IgnoreUnused(input);
309  armnn::IgnoreUnused(output);
310  armnn::IgnoreUnused(reasonIfUnsupported);
311  return true;
312 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 314 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

317 {
318  armnn::IgnoreUnused(input);
319  armnn::IgnoreUnused(output);
320  armnn::IgnoreUnused(reasonIfUnsupported);
321  return true;
322 }
void IgnoreUnused(Ts &&...)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 324 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution2dWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

330 {
331  bool isFastMathEnabled = false;
332 #if defined(ARMCOMPUTENEON_ENABLED)
333  if (m_ModelContextPtr)
334  {
335  if (m_ModelContextPtr.get() != nullptr)
336  {
337  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
338  if (modelOptions)
339  {
340  isFastMathEnabled = modelOptions->IsFastMathEnabled();
341  }
342  }
343  }
344 #endif
345 
347  reasonIfUnsupported,
348  input,
349  output,
350  descriptor,
351  weights,
352  biases,
353  isFastMathEnabled,
354  nullptr);
355 }
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 357 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

361 {
363  reasonIfUnsupported,
364  input,
365  output,
366  descriptor);
367 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 369 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by BOOST_AUTO_TEST_CASE(), and NeonLayerSupport::~NeonLayerSupport().

375 {
377  reasonIfUnsupported,
378  input,
379  output,
380  descriptor,
381  weights,
382  biases,
383  nullptr);
384 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 386 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

389 {
391  reasonIfUnsupported,
392  input,
393  output);
394 }
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reason = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 396 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

402 {
404  reasonIfUnsupported,
405  input,
406  output,
407  descriptor,
408  weights,
409  biases,
410  nullptr);
411 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 657 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

661 {
663  reasonIfUnsupported,
664  input0,
665  input1,
666  output,
667  nullptr);
668 }
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 413 of file NeonLayerSupport.cpp.

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonLogicalNotWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), and armnn::Rsqrt.

Referenced by NeonLayerSupport::IsAbsSupported(), NeonLayerSupport::IsRsqrtSupported(), and NeonLayerSupport::~NeonLayerSupport().

417 {
418  switch(descriptor.m_Operation)
419  {
420  case UnaryOperation::Abs:
422  reasonIfUnsupported,
423  input,
424  output);
425  case UnaryOperation::Exp:
427  reasonIfUnsupported,
428  input,
429  output);
430  case UnaryOperation::Neg:
432  reasonIfUnsupported,
433  input,
434  output);
437  reasonIfUnsupported,
438  input,
439  output);
442  reasonIfUnsupported,
443  input,
444  output);
445  default:
446  return false;
447  }
448 }
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 450 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

454 {
455  armnn::IgnoreUnused(input);
456  armnn::IgnoreUnused(output);
457  armnn::IgnoreUnused(descriptor);
458 
459  return IsNeonBackendSupported(reasonIfUnsupported);
460 }
void IgnoreUnused(Ts &&...)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 462 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by NeonLayerSupport::~NeonLayerSupport().

465 {
466  armnn::IgnoreUnused(output);
467  return IsNeonBackendSupported(reasonIfUnsupported) &&
468  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
469  input.GetDataType(),
470  &FalseFuncF16<>,
471  &TrueFunc<>,
472  &FalseFuncU8<>,
473  &FalseFuncI32<>,
474  &FalseFuncU8<>);
475 }
void IgnoreUnused(Ts &&...)
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 477 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

483 {
485  reasonIfUnsupported,
486  input,
487  output,
488  weights,
489  biases,
490  descriptor,
491  nullptr);
492 }
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 494 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

499 {
501  reasonIfUnsupported,
502  input0,
503  input1,
504  output,
505  descriptor);
506 }
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGreaterSupported()

bool IsGreaterSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 508 of file NeonLayerSupport.cpp.

References armnn::Greater, and NeonLayerSupport::IsComparisonSupported().

Referenced by NeonLayerSupport::~NeonLayerSupport().

512 {
513  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
514  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
515 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 517 of file NeonLayerSupport.cpp.

Referenced by NeonLayerSupport::~NeonLayerSupport().

519 {
520  return IsNeonBackendSupported(reasonIfUnsupported, input);
521 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 523 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

527 {
529  reasonIfUnsupported,
530  input,
531  output,
532  descriptor);
533 }
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 535 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

539 {
540  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
541 }
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 543 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalAnd, armnn::LogicalOr, LogicalBinaryDescriptor::m_Operation, armnn::NeonLogicalAndWorkloadValidate(), and armnn::NeonLogicalOrWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

548 {
549  switch(descriptor.m_Operation)
550  {
553  reasonIfUnsupported,
554  input0,
555  input1,
556  output);
559  reasonIfUnsupported,
560  input0,
561  input1,
562  output);
563  default:
564  return false;
565  }
566 }
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 568 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

572 {
573  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
574 }
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 576 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

586 {
588  reasonIfUnsupported,
589  input,
590  outputStateIn,
591  cellStateIn,
592  scratchBuffer,
593  outputStateOut,
594  cellStateOut,
595  output,
596  descriptor,
597  paramsInfo);
598 }
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 600 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

604 {
606  reasonIfUnsupported,
607  input0,
608  input1,
609  output);
610 }
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 612 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

616 {
618  reasonIfUnsupported,
619  input,
620  output,
621  descriptor);
622 }
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMergerSupported()

bool IsMergerSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const MergerDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 624 of file NeonLayerSupport.cpp.

References NeonLayerSupport::IsConcatSupported().

Referenced by NeonLayerSupport::~NeonLayerSupport().

628 {
629  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
630 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 632 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

636 {
638  reasonIfUnsupported,
639  input0,
640  input1,
641  output);
642 }
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 644 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

648 {
650  reasonIfUnsupported,
651  input0,
652  input1,
653  output,
654  nullptr);
655 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 670 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

674 {
676  reasonIfUnsupported,
677  input,
678  output,
679  descriptor);
680 }
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 682 of file NeonLayerSupport.cpp.

Referenced by NeonLayerSupport::~NeonLayerSupport().

684 {
685  return IsNeonBackendSupported(reasonIfUnsupported, output);
686 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 688 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

692 {
694  reasonIfUnsupported,
695  input,
696  output,
697  descriptor);
698 }
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 700 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

704 {
705  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
706 }
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 708 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

712 {
713  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
714 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 716 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

720 {
721  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
722 }
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 724 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by NeonLayerSupport::~NeonLayerSupport().

733 {
734  // Check required here in order to pass IsLayerSupported for datatypes tests
735  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
736  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
737  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
738  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
739  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
740  output.GetDataType() == armnn::DataType::QAsymmS8)
741  {
743  reasonIfUnsupported,
744  input,
745  previousCellStateIn,
746  previousOutputIn,
747  cellStateOut,
748  outputStateOut,
749  output,
750  descriptor,
751  paramsInfo);
752  }
753  else
754  {
755  return false;
756  }
757 }
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo cellStateIn,
const TensorInfo outputStateIn,
const TensorInfo cellStateOut,
const TensorInfo outputStateOut,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 769 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

776 {
778  reasonIfUnsupported,
779  input,
780  cellStateIn,
781  outputStateIn,
782  cellStateOut,
783  outputStateOut,
784  paramsInfo);
785 }
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 759 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

762 {
764  reasonIfUnsupported,
765  input,
766  output);
767 }
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 787 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

791 {
792  armnn::IgnoreUnused(descriptor);
794  reasonIfUnsupported,
795  input,
796  output);
797 }
void IgnoreUnused(Ts &&...)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsResizeBilinearSupported()

bool IsResizeBilinearSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 811 of file NeonLayerSupport.cpp.

References armnn::Bilinear, TensorInfo::GetShape(), NeonLayerSupport::IsResizeSupported(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and armnn::NCHW.

Referenced by NeonLayerSupport::~NeonLayerSupport().

814 {
815  ResizeDescriptor descriptor;
816  descriptor.m_Method = ResizeMethod::Bilinear;
817  descriptor.m_DataLayout = DataLayout::NCHW;
818 
819  const TensorShape& outputShape = output.GetShape();
820  descriptor.m_TargetHeight = outputShape[2];
821  descriptor.m_TargetWidth = outputShape[3];
822 
823  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
824 }
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 799 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by NeonLayerSupport::IsResizeBilinearSupported(), and NeonLayerSupport::~NeonLayerSupport().

803 {
805  reasonIfUnsupported,
806  input,
807  output,
808  descriptor);
809 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)

◆ IsRsqrtSupported()

bool IsRsqrtSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 826 of file NeonLayerSupport.cpp.

References NeonLayerSupport::IsElementwiseUnarySupported(), and armnn::Rsqrt.

Referenced by NeonLayerSupport::~NeonLayerSupport().

829 {
830  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
831  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
832 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 834 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

838 {
840  reasonIfUnsupported,
841  input,
842  output,
843  descriptor);
844 }
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 846 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

850 {
851  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
852 }
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 854 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

858 {
860  reasonIfUnsupported,
861  input,
862  output,
863  descriptor);
864 }
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 866 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

870 {
872  reasonIfUnsupported,
873  input,
874  output,
875  descriptor);
876 }
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSplitterSupported() [1/2]

bool IsSplitterSupported ( const TensorInfo input,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 878 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

881 {
882  armnn::IgnoreUnused(descriptor);
883  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
884  input.GetDataType(),
885  &TrueFunc<>,
886  &TrueFunc<>);
887 }
void IgnoreUnused(Ts &&...)

◆ IsSplitterSupported() [2/2]

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 889 of file NeonLayerSupport.cpp.

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

893 {
894 #if defined(ARMCOMPUTENEON_ENABLED)
895  // Split along the last dimension, cannot use sub-tensors
896  // as width and height of the sub-tensors do not match
897  // the width and height of the parent tensor
898  // in case of input with more than 2D.
899  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
900  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
901  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
902  {
904  reasonIfUnsupported,
905  input,
906  outputs,
907  *splitAxis.begin());
908  }
909 #endif
910  IgnoreUnused(descriptor);
911  for (auto output : outputs)
912  {
913  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
914  {
915  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
916  return false;
917  }
918  }
919  return true;
920 }
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 922 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

926 {
928  reasonIfUnsupported,
929  inputs,
930  output,
931  descriptor);
932 }
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 934 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

938 {
940  reasonIfUnsupported,
941  input,
942  output,
943  descriptor);
944 }
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 946 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

950 {
952  reasonIfUnsupported,
953  input0,
954  input1,
955  output,
956  nullptr);
957 }
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 959 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

965 {
967  reasonIfUnsupported,
968  input,
969  output,
970  descriptor,
971  weights,
972  biases);
973 }
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 975 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

979 {
980  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
981 }
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

The documentation for this class was generated from the following files: