ArmNN
 21.02
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

 NeonLayerSupport (const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
 NeonLayerSupport ()
 
 ~NeonLayerSupport ()
 
bool IsAbsSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsGreaterSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeBilinearSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRsqrtSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsEqualSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 14 of file NeonLayerSupport.hpp.

Constructor & Destructor Documentation

◆ NeonLayerSupport() [1/2]

Definition at line 134 of file NeonLayerSupport.cpp.

135  : m_ModelContextPtr(modelContextPtr)
136 {
137 }

◆ NeonLayerSupport() [2/2]

Definition at line 139 of file NeonLayerSupport.cpp.

140  : m_ModelContextPtr(nullptr)
141 {
142 }

◆ ~NeonLayerSupport()

~NeonLayerSupport ( )
inline

Definition at line 20 of file NeonLayerSupport.hpp.

References ARMNN_DEPRECATED_MSG, NeonLayerSupport::IsAbsSupported(), NeonLayerSupport::IsActivationSupported(), NeonLayerSupport::IsAdditionSupported(), NeonLayerSupport::IsArgMinMaxSupported(), NeonLayerSupport::IsBatchNormalizationSupported(), NeonLayerSupport::IsBatchToSpaceNdSupported(), NeonLayerSupport::IsComparisonSupported(), NeonLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConstantSupported(), NeonLayerSupport::IsConvertBf16ToFp32Supported(), NeonLayerSupport::IsConvertFp16ToFp32Supported(), NeonLayerSupport::IsConvertFp32ToBf16Supported(), NeonLayerSupport::IsConvertFp32ToFp16Supported(), NeonLayerSupport::IsConvolution2dSupported(), NeonLayerSupport::IsDepthToSpaceSupported(), NeonLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsDequantizeSupported(), NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(), NeonLayerSupport::IsDivisionSupported(), NeonLayerSupport::IsElementwiseUnarySupported(), NeonLayerSupport::IsFillSupported(), NeonLayerSupport::IsFloorSupported(), NeonLayerSupport::IsFullyConnectedSupported(), NeonLayerSupport::IsGatherSupported(), NeonLayerSupport::IsGreaterSupported(), NeonLayerSupport::IsInputSupported(), NeonLayerSupport::IsInstanceNormalizationSupported(), NeonLayerSupport::IsL2NormalizationSupported(), NeonLayerSupport::IsLogicalBinarySupported(), NeonLayerSupport::IsLogSoftmaxSupported(), NeonLayerSupport::IsLstmSupported(), NeonLayerSupport::IsMaximumSupported(), NeonLayerSupport::IsMeanSupported(), NeonLayerSupport::IsMergerSupported(), NeonLayerSupport::IsMinimumSupported(), NeonLayerSupport::IsMultiplicationSupported(), NeonLayerSupport::IsNormalizationSupported(), NeonLayerSupport::IsOutputSupported(), NeonLayerSupport::IsPadSupported(), NeonLayerSupport::IsPermuteSupported(), NeonLayerSupport::IsPooling2dSupported(), NeonLayerSupport::IsPreluSupported(), NeonLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQuantizedLstmSupported(), NeonLayerSupport::IsQuantizeSupported(), NeonLayerSupport::IsReduceSupported(), NeonLayerSupport::IsReshapeSupported(), NeonLayerSupport::IsResizeBilinearSupported(), NeonLayerSupport::IsResizeSupported(), NeonLayerSupport::IsRsqrtSupported(), NeonLayerSupport::IsSliceSupported(), NeonLayerSupport::IsSoftmaxSupported(), NeonLayerSupport::IsSpaceToBatchNdSupported(), NeonLayerSupport::IsSpaceToDepthSupported(), NeonLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsStackSupported(), NeonLayerSupport::IsStridedSliceSupported(), NeonLayerSupport::IsSubtractionSupported(), NeonLayerSupport::IsTransposeConvolution2dSupported(), and NeonLayerSupport::IsTransposeSupported().

20 {}

Member Function Documentation

◆ IsAbsSupported()

bool IsAbsSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 144 of file NeonLayerSupport.cpp.

References armnn::Abs, and NeonLayerSupport::IsElementwiseUnarySupported().

Referenced by NeonLayerSupport::~NeonLayerSupport().

147 {
148  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
149  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
150 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 152 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

156 {
157  IgnoreUnused(descriptor);
159  reasonIfUnsupported,
160  input,
161  output,
162  descriptor);
163 }
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
void IgnoreUnused(Ts &&...)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 165 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

169 {
171  reasonIfUnsupported,
172  input0,
173  input1,
174  output,
175  nullptr);
176 }
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 178 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

182 {
184  reasonIfUnsupported,
185  input,
186  output,
187  descriptor);
188 }
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 190 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

198 {
200  reasonIfUnsupported,
201  input,
202  output,
203  mean,
204  var,
205  beta,
206  gamma,
207  descriptor,
208  nullptr);
209 }
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 211 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

215 {
217  reasonIfUnsupported,
218  input,
219  output,
220  descriptor);
221 }
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 223 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by NeonLayerSupport::IsGreaterSupported(), and NeonLayerSupport::~NeonLayerSupport().

228 {
229 
231  reasonIfUnsupported,
232  input0,
233  input1,
234  output,
235  descriptor);
236 }
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const ConcatDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 238 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by NeonLayerSupport::IsMergerSupported(), and NeonLayerSupport::~NeonLayerSupport().

242 {
243  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
244  {
245  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
246  return false;
247  }
248 
249  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
250  if(concatInnerAxis < 3) // Width, height, or channels
251  {
253  reasonIfUnsupported,
254  inputs,
255  output,
256  descriptor);
257  }
258  else if (concatInnerAxis == 3)
259  {
260  for (auto& input : inputs)
261  {
262  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
263  {
264  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
265  return false;
266  }
267  }
268  return true; // Sub-tensors support concat along batch
269  }
270  else // > 4 dimensions not supported.
271  {
272  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
273  return false;
274  }
275 }
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 277 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

279 {
281  reasonIfUnsupported,
282  output);
283 }
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 285 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

288 {
289  armnn::IgnoreUnused(input);
290  armnn::IgnoreUnused(output);
291  armnn::IgnoreUnused(reasonIfUnsupported);
292  return true;
293 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 295 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

298 {
299  armnn::IgnoreUnused(input);
300  armnn::IgnoreUnused(output);
301  armnn::IgnoreUnused(reasonIfUnsupported);
302  return true;
303 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 305 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

308 {
309  armnn::IgnoreUnused(input);
310  armnn::IgnoreUnused(output);
311  armnn::IgnoreUnused(reasonIfUnsupported);
312  return true;
313 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 315 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

318 {
319  armnn::IgnoreUnused(input);
320  armnn::IgnoreUnused(output);
321  armnn::IgnoreUnused(reasonIfUnsupported);
322  return true;
323 }
void IgnoreUnused(Ts &&...)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 325 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution2dWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

331 {
332  bool isFastMathEnabled = false;
333 #if defined(ARMCOMPUTENEON_ENABLED)
334  if (m_ModelContextPtr)
335  {
336  if (m_ModelContextPtr.get() != nullptr)
337  {
338  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
339  if (modelOptions)
340  {
341  isFastMathEnabled = modelOptions->IsFastMathEnabled();
342  }
343  }
344  }
345 #endif
346 
348  reasonIfUnsupported,
349  input,
350  output,
351  descriptor,
352  weights,
353  biases,
354  isFastMathEnabled,
355  nullptr);
356 }
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 358 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

362 {
364  reasonIfUnsupported,
365  input,
366  output,
367  descriptor);
368 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 370 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by BOOST_AUTO_TEST_CASE(), and NeonLayerSupport::~NeonLayerSupport().

376 {
378  reasonIfUnsupported,
379  input,
380  output,
381  descriptor,
382  weights,
383  biases,
384  nullptr);
385 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 387 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

390 {
392  reasonIfUnsupported,
393  input,
394  output);
395 }
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reason = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 397 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

403 {
405  reasonIfUnsupported,
406  input,
407  output,
408  descriptor,
409  weights,
410  biases,
411  nullptr);
412 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 658 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

662 {
664  reasonIfUnsupported,
665  input0,
666  input1,
667  output,
668  nullptr);
669 }
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 414 of file NeonLayerSupport.cpp.

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonLogicalNotWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), and armnn::Rsqrt.

Referenced by NeonLayerSupport::IsAbsSupported(), NeonLayerSupport::IsRsqrtSupported(), and NeonLayerSupport::~NeonLayerSupport().

418 {
419  switch(descriptor.m_Operation)
420  {
421  case UnaryOperation::Abs:
423  reasonIfUnsupported,
424  input,
425  output);
426  case UnaryOperation::Exp:
428  reasonIfUnsupported,
429  input,
430  output);
431  case UnaryOperation::Neg:
433  reasonIfUnsupported,
434  input,
435  output);
438  reasonIfUnsupported,
439  input,
440  output);
443  reasonIfUnsupported,
444  input,
445  output);
446  default:
447  return false;
448  }
449 }
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 451 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

455 {
456  armnn::IgnoreUnused(input);
457  armnn::IgnoreUnused(output);
458  armnn::IgnoreUnused(descriptor);
459 
460  return IsNeonBackendSupported(reasonIfUnsupported);
461 }
void IgnoreUnused(Ts &&...)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 463 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by NeonLayerSupport::~NeonLayerSupport().

466 {
467  armnn::IgnoreUnused(output);
468  return IsNeonBackendSupported(reasonIfUnsupported) &&
469  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
470  input.GetDataType(),
471  &FalseFuncF16<>,
472  &TrueFunc<>,
473  &FalseFuncU8<>,
474  &FalseFuncI32<>,
475  &FalseFuncU8<>);
476 }
void IgnoreUnused(Ts &&...)
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 478 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

484 {
486  reasonIfUnsupported,
487  input,
488  output,
489  weights,
490  biases,
491  descriptor,
492  nullptr);
493 }
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 495 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

500 {
502  reasonIfUnsupported,
503  input0,
504  input1,
505  output,
506  descriptor);
507 }
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGreaterSupported()

bool IsGreaterSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 509 of file NeonLayerSupport.cpp.

References armnn::Greater, and NeonLayerSupport::IsComparisonSupported().

Referenced by NeonLayerSupport::~NeonLayerSupport().

513 {
514  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
515  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
516 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 518 of file NeonLayerSupport.cpp.

Referenced by NeonLayerSupport::~NeonLayerSupport().

520 {
521  return IsNeonBackendSupported(reasonIfUnsupported, input);
522 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 524 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

528 {
530  reasonIfUnsupported,
531  input,
532  output,
533  descriptor);
534 }
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 536 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

540 {
541  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
542 }
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 544 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalAnd, armnn::LogicalOr, LogicalBinaryDescriptor::m_Operation, armnn::NeonLogicalAndWorkloadValidate(), and armnn::NeonLogicalOrWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

549 {
550  switch(descriptor.m_Operation)
551  {
554  reasonIfUnsupported,
555  input0,
556  input1,
557  output);
560  reasonIfUnsupported,
561  input0,
562  input1,
563  output);
564  default:
565  return false;
566  }
567 }
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 569 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

573 {
574  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
575 }
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 577 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

587 {
589  reasonIfUnsupported,
590  input,
591  outputStateIn,
592  cellStateIn,
593  scratchBuffer,
594  outputStateOut,
595  cellStateOut,
596  output,
597  descriptor,
598  paramsInfo);
599 }
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 601 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

605 {
607  reasonIfUnsupported,
608  input0,
609  input1,
610  output);
611 }
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 613 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

617 {
619  reasonIfUnsupported,
620  input,
621  output,
622  descriptor);
623 }
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMergerSupported()

bool IsMergerSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const MergerDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 625 of file NeonLayerSupport.cpp.

References NeonLayerSupport::IsConcatSupported().

Referenced by NeonLayerSupport::~NeonLayerSupport().

629 {
630  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
631 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 633 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

637 {
639  reasonIfUnsupported,
640  input0,
641  input1,
642  output);
643 }
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 645 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

649 {
651  reasonIfUnsupported,
652  input0,
653  input1,
654  output,
655  nullptr);
656 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 671 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

675 {
677  reasonIfUnsupported,
678  input,
679  output,
680  descriptor);
681 }
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 683 of file NeonLayerSupport.cpp.

Referenced by NeonLayerSupport::~NeonLayerSupport().

685 {
686  return IsNeonBackendSupported(reasonIfUnsupported, output);
687 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 689 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

693 {
695  reasonIfUnsupported,
696  input,
697  output,
698  descriptor);
699 }
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 701 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

705 {
706  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
707 }
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 709 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

713 {
714  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
715 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 717 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

721 {
722  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
723 }
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 725 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by NeonLayerSupport::~NeonLayerSupport().

734 {
735  // Check required here in order to pass IsLayerSupported for datatypes tests
736  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
737  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
738  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
739  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
740  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
741  output.GetDataType() == armnn::DataType::QAsymmS8)
742  {
744  reasonIfUnsupported,
745  input,
746  previousCellStateIn,
747  previousOutputIn,
748  cellStateOut,
749  outputStateOut,
750  output,
751  descriptor,
752  paramsInfo);
753  }
754  else
755  {
756  return false;
757  }
758 }
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo cellStateIn,
const TensorInfo outputStateIn,
const TensorInfo cellStateOut,
const TensorInfo outputStateOut,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 770 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

777 {
779  reasonIfUnsupported,
780  input,
781  cellStateIn,
782  outputStateIn,
783  cellStateOut,
784  outputStateOut,
785  paramsInfo);
786 }
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 760 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

763 {
765  reasonIfUnsupported,
766  input,
767  output);
768 }
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 788 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReduceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

792 {
794  reasonIfUnsupported,
795  input,
796  output,
797  descriptor);
798 }
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 800 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

804 {
805  armnn::IgnoreUnused(descriptor);
807  reasonIfUnsupported,
808  input,
809  output);
810 }
void IgnoreUnused(Ts &&...)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsResizeBilinearSupported()

bool IsResizeBilinearSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 824 of file NeonLayerSupport.cpp.

References armnn::Bilinear, TensorInfo::GetShape(), NeonLayerSupport::IsResizeSupported(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and armnn::NCHW.

Referenced by NeonLayerSupport::~NeonLayerSupport().

827 {
828  ResizeDescriptor descriptor;
829  descriptor.m_Method = ResizeMethod::Bilinear;
830  descriptor.m_DataLayout = DataLayout::NCHW;
831 
832  const TensorShape& outputShape = output.GetShape();
833  descriptor.m_TargetHeight = outputShape[2];
834  descriptor.m_TargetWidth = outputShape[3];
835 
836  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
837 }
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 812 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by NeonLayerSupport::IsResizeBilinearSupported(), and NeonLayerSupport::~NeonLayerSupport().

816 {
818  reasonIfUnsupported,
819  input,
820  output,
821  descriptor);
822 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)

◆ IsRsqrtSupported()

bool IsRsqrtSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 839 of file NeonLayerSupport.cpp.

References NeonLayerSupport::IsElementwiseUnarySupported(), and armnn::Rsqrt.

Referenced by NeonLayerSupport::~NeonLayerSupport().

842 {
843  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
844  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
845 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 847 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

851 {
853  reasonIfUnsupported,
854  input,
855  output,
856  descriptor);
857 }
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 859 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

863 {
864  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
865 }
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 867 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

871 {
873  reasonIfUnsupported,
874  input,
875  output,
876  descriptor);
877 }
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 879 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

883 {
885  reasonIfUnsupported,
886  input,
887  output,
888  descriptor);
889 }
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSplitterSupported() [1/2]

bool IsSplitterSupported ( const TensorInfo input,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 891 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IgnoreUnused().

Referenced by NeonLayerSupport::~NeonLayerSupport().

894 {
895  armnn::IgnoreUnused(descriptor);
896  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
897  input.GetDataType(),
898  &TrueFunc<>,
899  &TrueFunc<>);
900 }
void IgnoreUnused(Ts &&...)

◆ IsSplitterSupported() [2/2]

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 902 of file NeonLayerSupport.cpp.

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

906 {
907 #if defined(ARMCOMPUTENEON_ENABLED)
908  // Split along the last dimension, cannot use sub-tensors
909  // as width and height of the sub-tensors do not match
910  // the width and height of the parent tensor
911  // in case of input with more than 2D.
912  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
913  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
914  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
915  {
917  reasonIfUnsupported,
918  input,
919  outputs,
920  *splitAxis.begin());
921  }
922 #endif
923  IgnoreUnused(descriptor);
924  for (auto output : outputs)
925  {
926  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
927  {
928  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
929  return false;
930  }
931  }
932  return true;
933 }
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 935 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

939 {
941  reasonIfUnsupported,
942  inputs,
943  output,
944  descriptor);
945 }
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 947 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

951 {
953  reasonIfUnsupported,
954  input,
955  output,
956  descriptor);
957 }
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 959 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

963 {
965  reasonIfUnsupported,
966  input0,
967  input1,
968  output,
969  nullptr);
970 }
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 972 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

978 {
980  reasonIfUnsupported,
981  input,
982  output,
983  descriptor,
984  weights,
985  biases);
986 }
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 988 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

992 {
993  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
994 }
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

The documentation for this class was generated from the following files: