ArmNN
 20.08
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsAbsSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsGreaterSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeBilinearSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRsqrtSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsEqualSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file NeonLayerSupport.hpp.

Member Function Documentation

◆ IsAbsSupported()

bool IsAbsSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 128 of file NeonLayerSupport.cpp.

References armnn::Abs, and NeonLayerSupport::IsElementwiseUnarySupported().

131 {
132  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
133  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
134 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 136 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

140 {
141  IgnoreUnused(descriptor);
143  reasonIfUnsupported,
144  input,
145  output,
146  descriptor);
147 }
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
void IgnoreUnused(Ts &&...)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 149 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

153 {
155  reasonIfUnsupported,
156  input0,
157  input1,
158  output);
159 }
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 161 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

165 {
167  reasonIfUnsupported,
168  input,
169  output,
170  descriptor);
171 }
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 173 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

181 {
183  reasonIfUnsupported,
184  input,
185  output,
186  mean,
187  var,
188  beta,
189  gamma,
190  descriptor);
191 }
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 193 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

197 {
199  reasonIfUnsupported,
200  input,
201  output,
202  descriptor);
203 }
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 205 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by NeonLayerSupport::IsGreaterSupported().

210 {
211 
213  reasonIfUnsupported,
214  input0,
215  input1,
216  output,
217  descriptor);
218 }
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const ConcatDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 220 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by NeonLayerSupport::IsMergerSupported().

224 {
225  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
226  {
227  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
228  return false;
229  }
230 
231  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
232  if(concatInnerAxis < 3) // Width, height, or channels
233  {
235  reasonIfUnsupported,
236  inputs,
237  output,
238  descriptor);
239  }
240  else if (concatInnerAxis == 3)
241  {
242  for (auto& input : inputs)
243  {
244  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
245  {
246  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
247  return false;
248  }
249  }
250  return true; // Sub-tensors support concat along batch
251  }
252  else // > 4 dimensions not supported.
253  {
254  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
255  return false;
256  }
257 }
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 259 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

261 {
263  reasonIfUnsupported,
264  output);
265 }
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 267 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

270 {
271  armnn::IgnoreUnused(input);
272  armnn::IgnoreUnused(output);
273  armnn::IgnoreUnused(reasonIfUnsupported);
274  return true;
275 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 277 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

280 {
281  armnn::IgnoreUnused(input);
282  armnn::IgnoreUnused(output);
283  armnn::IgnoreUnused(reasonIfUnsupported);
284  return true;
285 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 287 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

290 {
291  armnn::IgnoreUnused(input);
292  armnn::IgnoreUnused(output);
293  armnn::IgnoreUnused(reasonIfUnsupported);
294  return true;
295 }
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 297 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

300 {
301  armnn::IgnoreUnused(input);
302  armnn::IgnoreUnused(output);
303  armnn::IgnoreUnused(reasonIfUnsupported);
304  return true;
305 }
void IgnoreUnused(Ts &&...)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 307 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvolution2dWorkloadValidate().

313 {
315  reasonIfUnsupported,
316  input,
317  output,
318  descriptor,
319  weights,
320  biases);
321 }
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 323 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

327 {
329  reasonIfUnsupported,
330  input,
331  output,
332  descriptor);
333 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 335 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by BOOST_AUTO_TEST_CASE().

341 {
343  reasonIfUnsupported,
344  input,
345  output,
346  descriptor,
347  weights,
348  biases);
349 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 351 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

354 {
356  reasonIfUnsupported,
357  input,
358  output);
359 }
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reason = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 361 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

367 {
369  reasonIfUnsupported,
370  input,
371  output,
372  descriptor,
373  weights,
374  biases);
375 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 589 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

593 {
595  reasonIfUnsupported,
596  input0,
597  input1,
598  output);
599 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 377 of file NeonLayerSupport.cpp.

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), and armnn::Rsqrt.

Referenced by NeonLayerSupport::IsAbsSupported(), and NeonLayerSupport::IsRsqrtSupported().

381 {
382  switch(descriptor.m_Operation)
383  {
384  case UnaryOperation::Abs:
386  reasonIfUnsupported,
387  input,
388  output);
389  case UnaryOperation::Exp:
391  reasonIfUnsupported,
392  input,
393  output);
394  case UnaryOperation::Neg:
396  reasonIfUnsupported,
397  input,
398  output);
401  reasonIfUnsupported,
402  input,
403  output);
404  default:
405  return false;
406  }
407 }
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 409 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

413 {
414  armnn::IgnoreUnused(input);
415  armnn::IgnoreUnused(output);
416  armnn::IgnoreUnused(descriptor);
417 
418  return IsNeonBackendSupported(reasonIfUnsupported);
419 }
void IgnoreUnused(Ts &&...)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 421 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

424 {
425  armnn::IgnoreUnused(output);
426  return IsNeonBackendSupported(reasonIfUnsupported) &&
427  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
428  input.GetDataType(),
429  &FalseFuncF16<>,
430  &TrueFunc<>,
431  &FalseFuncU8<>,
432  &FalseFuncI32<>,
433  &FalseFuncU8<>);
434 }
void IgnoreUnused(Ts &&...)
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 436 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

442 {
444  reasonIfUnsupported,
445  input,
446  output,
447  weights,
448  biases,
449  descriptor);
450 }
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 452 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

457 {
459  reasonIfUnsupported,
460  input0,
461  input1,
462  output,
463  descriptor);
464 }
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGreaterSupported()

bool IsGreaterSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 466 of file NeonLayerSupport.cpp.

References armnn::Greater, and NeonLayerSupport::IsComparisonSupported().

470 {
471  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
472  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
473 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 475 of file NeonLayerSupport.cpp.

477 {
478  return IsNeonBackendSupported(reasonIfUnsupported, input);
479 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 481 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

485 {
487  reasonIfUnsupported,
488  input,
489  output,
490  descriptor);
491 }
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 493 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

497 {
498  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
499 }
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 501 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

505 {
506  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
507 }
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 509 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

519 {
521  reasonIfUnsupported,
522  input,
523  outputStateIn,
524  cellStateIn,
525  scratchBuffer,
526  outputStateOut,
527  cellStateOut,
528  output,
529  descriptor,
530  paramsInfo);
531 }
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 533 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

537 {
539  reasonIfUnsupported,
540  input0,
541  input1,
542  output);
543 }
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 545 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

549 {
551  reasonIfUnsupported,
552  input,
553  output,
554  descriptor);
555 }
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMergerSupported()

bool IsMergerSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const MergerDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 557 of file NeonLayerSupport.cpp.

References NeonLayerSupport::IsConcatSupported().

561 {
562  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
563 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 565 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

569 {
571  reasonIfUnsupported,
572  input0,
573  input1,
574  output);
575 }
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 577 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

581 {
583  reasonIfUnsupported,
584  input0,
585  input1,
586  output);
587 }
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 601 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

605 {
607  reasonIfUnsupported,
608  input,
609  output,
610  descriptor);
611 }
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 613 of file NeonLayerSupport.cpp.

615 {
616  return IsNeonBackendSupported(reasonIfUnsupported, output);
617 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 619 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

623 {
625  reasonIfUnsupported,
626  input,
627  output,
628  descriptor);
629 }
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 631 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

635 {
636  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
637 }
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 639 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

643 {
644  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
645 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 647 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

651 {
652  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
653 }
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 655 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

664 {
665  // Check required here in order to pass IsLayerSupported for datatypes tests
666  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
667  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
668  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
669  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
670  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
671  output.GetDataType() == armnn::DataType::QAsymmS8)
672  {
674  reasonIfUnsupported,
675  input,
676  previousCellStateIn,
677  previousOutputIn,
678  cellStateOut,
679  outputStateOut,
680  output,
681  descriptor,
682  paramsInfo);
683  }
684  else
685  {
686  return false;
687  }
688 }
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo cellStateIn,
const TensorInfo outputStateIn,
const TensorInfo cellStateOut,
const TensorInfo outputStateOut,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 700 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

707 {
709  reasonIfUnsupported,
710  input,
711  cellStateIn,
712  outputStateIn,
713  cellStateOut,
714  outputStateOut,
715  paramsInfo);
716 }
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 690 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

693 {
695  reasonIfUnsupported,
696  input,
697  output);
698 }
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 718 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

722 {
723  armnn::IgnoreUnused(descriptor);
725  reasonIfUnsupported,
726  input,
727  output);
728 }
void IgnoreUnused(Ts &&...)
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsResizeBilinearSupported()

bool IsResizeBilinearSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 742 of file NeonLayerSupport.cpp.

References armnn::Bilinear, TensorInfo::GetShape(), NeonLayerSupport::IsResizeSupported(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and armnn::NCHW.

745 {
746  ResizeDescriptor descriptor;
747  descriptor.m_Method = ResizeMethod::Bilinear;
748  descriptor.m_DataLayout = DataLayout::NCHW;
749 
750  const TensorShape& outputShape = output.GetShape();
751  descriptor.m_TargetHeight = outputShape[2];
752  descriptor.m_TargetWidth = outputShape[3];
753 
754  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
755 }
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 730 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by NeonLayerSupport::IsResizeBilinearSupported().

734 {
736  reasonIfUnsupported,
737  input,
738  output,
739  descriptor);
740 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)

◆ IsRsqrtSupported()

bool IsRsqrtSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 757 of file NeonLayerSupport.cpp.

References NeonLayerSupport::IsElementwiseUnarySupported(), and armnn::Rsqrt.

760 {
761  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
762  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
763 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 765 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

769 {
771  reasonIfUnsupported,
772  input,
773  output,
774  descriptor);
775 }
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 777 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

781 {
782  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
783 }
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 785 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

789 {
791  reasonIfUnsupported,
792  input,
793  output,
794  descriptor);
795 }
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 797 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

801 {
803  reasonIfUnsupported,
804  input,
805  output,
806  descriptor);
807 }
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSplitterSupported() [1/2]

bool IsSplitterSupported ( const TensorInfo input,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 809 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IgnoreUnused().

812 {
813  armnn::IgnoreUnused(descriptor);
814  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
815  input.GetDataType(),
816  &TrueFunc<>,
817  &TrueFunc<>);
818 }
void IgnoreUnused(Ts &&...)

◆ IsSplitterSupported() [2/2]

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 820 of file NeonLayerSupport.cpp.

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

824 {
825 #if defined(ARMCOMPUTENEON_ENABLED)
826  // Split along the last dimension, cannot use sub-tensors
827  // as width and height of the sub-tensors do not match
828  // the width and height of the parent tensor
829  // in case of input with more than 2D.
830  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
831  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
832  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
833  {
835  reasonIfUnsupported,
836  input,
837  outputs,
838  *splitAxis.begin());
839  }
840 #endif
841  IgnoreUnused(descriptor);
842  for (auto output : outputs)
843  {
844  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
845  {
846  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
847  return false;
848  }
849  }
850  return true;
851 }
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 853 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

857 {
859  reasonIfUnsupported,
860  inputs,
861  output,
862  descriptor);
863 }
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 865 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

869 {
871  reasonIfUnsupported,
872  input,
873  output,
874  descriptor);
875 }
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 877 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

881 {
883  reasonIfUnsupported,
884  input0,
885  input1,
886  output);
887 }
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 889 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

895 {
897  reasonIfUnsupported,
898  input,
899  output,
900  descriptor,
901  weights,
902  biases);
903 }
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 905 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

909 {
910  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
911 }
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

The documentation for this class was generated from the following files: