From a983e4699082a0b1ef685bab7354f2ad9cd37a44 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Wed, 20 May 2020 16:12:19 +0100 Subject: Updating Doxygen documentation for 20.05 release. Change-Id: I4d624343ed5fd6ae269c3d53532903084508fd14 Signed-off-by: Colm Donelan --- 20.05/classarmnn_1_1_neon_layer_support.xhtml | 3505 +++++++++++++++++++++++++ 1 file changed, 3505 insertions(+) create mode 100644 20.05/classarmnn_1_1_neon_layer_support.xhtml (limited to '20.05/classarmnn_1_1_neon_layer_support.xhtml') diff --git a/20.05/classarmnn_1_1_neon_layer_support.xhtml b/20.05/classarmnn_1_1_neon_layer_support.xhtml new file mode 100644 index 0000000000..21926f1e49 --- /dev/null +++ b/20.05/classarmnn_1_1_neon_layer_support.xhtml @@ -0,0 +1,3505 @@ + + + + + + + + + + + + + +ArmNN: NeonLayerSupport Class Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.05 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
NeonLayerSupport Class Reference
+
+
+ +

#include <NeonLayerSupport.hpp>

+
+Inheritance diagram for NeonLayerSupport:
+
+
+ + +LayerSupportBase +ILayerSupport + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Public Member Functions

bool IsAbsSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGreaterSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeBilinearSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRsqrtSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsEqualSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
+ + + + + + +

+Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 
+

Detailed Description

+
+

Definition at line 12 of file NeonLayerSupport.hpp.

+

Member Function Documentation

+ +

◆ IsAbsSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsAbsSupported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 126 of file NeonLayerSupport.cpp.

+ +

References armnn::Abs, and NeonLayerSupport::IsElementwiseUnarySupported().

+
129 {
130  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
131  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
132 }
+
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
+
+
+
+ +

◆ IsActivationSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsActivationSupported (const TensorInfoinput,
const TensorInfooutput,
const ActivationDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 134 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

+
138 {
139  IgnoreUnused(descriptor);
141  reasonIfUnsupported,
142  input,
143  output,
144  descriptor);
145 }
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
+
void IgnoreUnused(Ts &&...)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsAdditionSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsAdditionSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 147 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

+
151 {
153  reasonIfUnsupported,
154  input0,
155  input1,
156  output);
157 }
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsArgMinMaxSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsArgMinMaxSupported (const TensorInfoinput,
const TensorInfooutput,
const ArgMinMaxDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 159 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

+
163 {
165  reasonIfUnsupported,
166  input,
167  output,
168  descriptor);
169 }
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsBatchNormalizationSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsBatchNormalizationSupported (const TensorInfoinput,
const TensorInfooutput,
const TensorInfomean,
const TensorInfovar,
const TensorInfobeta,
const TensorInfogamma,
const BatchNormalizationDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 171 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

+
179 {
181  reasonIfUnsupported,
182  input,
183  output,
184  mean,
185  var,
186  beta,
187  gamma,
188  descriptor);
189 }
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsBatchToSpaceNdSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsBatchToSpaceNdSupported (const TensorInfoinput,
const TensorInfooutput,
const BatchToSpaceNdDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 191 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

+
195 {
197  reasonIfUnsupported,
198  input,
199  output,
200  descriptor);
201 }
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsComparisonSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsComparisonSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
const ComparisonDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 203 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

+ +

Referenced by NeonLayerSupport::IsGreaterSupported().

+
208 {
209 
211  reasonIfUnsupported,
212  input0,
213  input1,
214  output,
215  descriptor);
216 }
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsConcatSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs,
const TensorInfooutput,
const ConcatDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 218 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

+ +

Referenced by NeonLayerSupport::IsMergerSupported().

+
222 {
223  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
224  {
225  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
226  return false;
227  }
228 
229  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
230  if(concatInnerAxis < 3) // Width, height, or channels
231  {
233  reasonIfUnsupported,
234  inputs,
235  output,
236  descriptor);
237  }
238  else if (concatInnerAxis == 3)
239  {
240  for (auto& input : inputs)
241  {
242  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
243  {
244  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
245  return false;
246  }
247  }
248  return true; // Sub-tensors support concat along batch
249  }
250  else // > 4 dimensions not supported.
251  {
252  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
253  return false;
254  }
255 }
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
+
void SetValueChecked(Optional< T &> optionalRef, V &&val)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsConstantSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
bool IsConstantSupported (const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 257 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

+
259 {
261  reasonIfUnsupported,
262  output);
263 }
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsConvertBf16ToFp32Supported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsConvertBf16ToFp32Supported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 265 of file NeonLayerSupport.cpp.

+ +

References armnn::IgnoreUnused().

+
268 {
269  armnn::IgnoreUnused(input);
270  armnn::IgnoreUnused(output);
271  armnn::IgnoreUnused(reasonIfUnsupported);
272  return true;
273 }
void IgnoreUnused(Ts &&...)
+
+
+
+ +

◆ IsConvertFp16ToFp32Supported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsConvertFp16ToFp32Supported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 275 of file NeonLayerSupport.cpp.

+ +

References armnn::IgnoreUnused().

+
278 {
279  armnn::IgnoreUnused(input);
280  armnn::IgnoreUnused(output);
281  armnn::IgnoreUnused(reasonIfUnsupported);
282  return true;
283 }
void IgnoreUnused(Ts &&...)
+
+
+
+ +

◆ IsConvertFp32ToBf16Supported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsConvertFp32ToBf16Supported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 285 of file NeonLayerSupport.cpp.

+ +

References armnn::IgnoreUnused().

+
288 {
289  armnn::IgnoreUnused(input);
290  armnn::IgnoreUnused(output);
291  armnn::IgnoreUnused(reasonIfUnsupported);
292  return true;
293 }
void IgnoreUnused(Ts &&...)
+
+
+
+ +

◆ IsConvertFp32ToFp16Supported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsConvertFp32ToFp16Supported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 295 of file NeonLayerSupport.cpp.

+ +

References armnn::IgnoreUnused().

+
298 {
299  armnn::IgnoreUnused(input);
300  armnn::IgnoreUnused(output);
301  armnn::IgnoreUnused(reasonIfUnsupported);
302  return true;
303 }
void IgnoreUnused(Ts &&...)
+
+
+
+ +

◆ IsConvolution2dSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsConvolution2dSupported (const TensorInfoinput,
const TensorInfooutput,
const Convolution2dDescriptordescriptor,
const TensorInfoweights,
const Optional< TensorInfo > & biases,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 305 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvolution2dWorkloadValidate().

+
311 {
313  reasonIfUnsupported,
314  input,
315  output,
316  descriptor,
317  weights,
318  biases);
319 }
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsDepthToSpaceSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsDepthToSpaceSupported (const TensorInfoinput,
const TensorInfooutput,
const DepthToSpaceDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 321 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

+
325 {
327  reasonIfUnsupported,
328  input,
329  output,
330  descriptor);
331 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
+
+
+
+ +

◆ IsDepthwiseConvolutionSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsDepthwiseConvolutionSupported (const TensorInfoinput,
const TensorInfooutput,
const DepthwiseConvolution2dDescriptordescriptor,
const TensorInfoweights,
const Optional< TensorInfo > & biases,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 333 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

+ +

Referenced by BOOST_AUTO_TEST_CASE().

+
339 {
341  reasonIfUnsupported,
342  input,
343  output,
344  descriptor,
345  weights,
346  biases);
347 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsDequantizeSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsDequantizeSupported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 349 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

+
352 {
354  reasonIfUnsupported,
355  input,
356  output);
357 }
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsDilatedDepthwiseConvolutionSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfoinput,
const TensorInfooutput,
const DepthwiseConvolution2dDescriptordescriptor,
const TensorInfoweights,
const Optional< TensorInfo > & biases,
Optional< std::string &> reason = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 359 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

+
365 {
367  reasonIfUnsupported,
368  input,
369  output,
370  descriptor,
371  weights,
372  biases);
373 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsDivisionSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsDivisionSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 553 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

+
557 {
559  reasonIfUnsupported,
560  input0,
561  input1,
562  output);
563 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
+
+
+
+ +

◆ IsElementwiseUnarySupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsElementwiseUnarySupported (const TensorInfoinput,
const TensorInfooutput,
const ElementwiseUnaryDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 375 of file NeonLayerSupport.cpp.

+ +

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), and armnn::Rsqrt.

+ +

Referenced by NeonLayerSupport::IsAbsSupported(), and NeonLayerSupport::IsRsqrtSupported().

+
379 {
380  switch(descriptor.m_Operation)
381  {
382  case UnaryOperation::Abs:
384  reasonIfUnsupported,
385  input,
386  output);
387  case UnaryOperation::Exp:
389  reasonIfUnsupported,
390  input,
391  output);
392  case UnaryOperation::Neg:
394  reasonIfUnsupported,
395  input,
396  output);
399  reasonIfUnsupported,
400  input,
401  output);
402  default:
403  return false;
404  }
405 }
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+ + +
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+ + +
+
+
+ +

◆ IsFloorSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsFloorSupported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 407 of file NeonLayerSupport.cpp.

+ +

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

+
410 {
411  armnn::IgnoreUnused(output);
412  return IsNeonBackendSupported(reasonIfUnsupported) &&
413  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
414  input.GetDataType(),
415  &FalseFuncF16<>,
416  &TrueFunc<>,
417  &FalseFuncU8<>,
418  &FalseFuncI32<>,
419  &FalseFuncU8<>);
420 }
void IgnoreUnused(Ts &&...)
+
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
+
+
+
+ +

◆ IsFullyConnectedSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsFullyConnectedSupported (const TensorInfoinput,
const TensorInfooutput,
const TensorInfoweights,
const TensorInfobiases,
const FullyConnectedDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 422 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

+
428 {
430  reasonIfUnsupported,
431  input,
432  output,
433  weights,
434  biases,
435  descriptor);
436 }
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsGreaterSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsGreaterSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 438 of file NeonLayerSupport.cpp.

+ +

References armnn::Greater, and NeonLayerSupport::IsComparisonSupported().

+
442 {
443  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
444  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
445 }
+
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
+
+
+
+ +

◆ IsInputSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
bool IsInputSupported (const TensorInfoinput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 447 of file NeonLayerSupport.cpp.

+
449 {
450  return IsNeonBackendSupported(reasonIfUnsupported, input);
451 }
+
+
+ +

◆ IsInstanceNormalizationSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsInstanceNormalizationSupported (const TensorInfoinput,
const TensorInfooutput,
const InstanceNormalizationDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 453 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

+
457 {
459  reasonIfUnsupported,
460  input,
461  output,
462  descriptor);
463 }
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsL2NormalizationSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsL2NormalizationSupported (const TensorInfoinput,
const TensorInfooutput,
const L2NormalizationDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 465 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

+
469 {
470  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
471 }
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsLstmSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsLstmSupported (const TensorInfoinput,
const TensorInfooutputStateIn,
const TensorInfocellStateIn,
const TensorInfoscratchBuffer,
const TensorInfooutputStateOut,
const TensorInfocellStateOut,
const TensorInfooutput,
const LstmDescriptordescriptor,
const LstmInputParamsInfoparamsInfo,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 473 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

+
483 {
485  reasonIfUnsupported,
486  input,
487  outputStateIn,
488  cellStateIn,
489  scratchBuffer,
490  outputStateOut,
491  cellStateOut,
492  output,
493  descriptor,
494  paramsInfo);
495 }
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsMaximumSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsMaximumSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 497 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

+
501 {
503  reasonIfUnsupported,
504  input0,
505  input1,
506  output);
507 }
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsMeanSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsMeanSupported (const TensorInfoinput,
const TensorInfooutput,
const MeanDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 509 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

+
513 {
515  reasonIfUnsupported,
516  input,
517  output,
518  descriptor);
519 }
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsMergerSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsMergerSupported (const std::vector< const TensorInfo *> inputs,
const TensorInfooutput,
const MergerDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 521 of file NeonLayerSupport.cpp.

+ +

References NeonLayerSupport::IsConcatSupported().

+
525 {
526  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
527 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
+
+
+
+ +

◆ IsMinimumSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsMinimumSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 529 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

+
533 {
535  reasonIfUnsupported,
536  input0,
537  input1,
538  output);
539 }
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsMultiplicationSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsMultiplicationSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 541 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

+
545 {
547  reasonIfUnsupported,
548  input0,
549  input1,
550  output);
551 }
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsNormalizationSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsNormalizationSupported (const TensorInfoinput,
const TensorInfooutput,
const NormalizationDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 565 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

+
569 {
571  reasonIfUnsupported,
572  input,
573  output,
574  descriptor);
575 }
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsOutputSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
bool IsOutputSupported (const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 577 of file NeonLayerSupport.cpp.

+
579 {
580  return IsNeonBackendSupported(reasonIfUnsupported, output);
581 }
+
+
+ +

◆ IsPadSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsPadSupported (const TensorInfoinput,
const TensorInfooutput,
const PadDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 583 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

+
587 {
589  reasonIfUnsupported,
590  input,
591  output,
592  descriptor);
593 }
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsPermuteSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsPermuteSupported (const TensorInfoinput,
const TensorInfooutput,
const PermuteDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 595 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

+
599 {
600  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
601 }
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsPooling2dSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsPooling2dSupported (const TensorInfoinput,
const TensorInfooutput,
const Pooling2dDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 603 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

+
607 {
608  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
609 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
+
+
+
+ +

◆ IsPreluSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsPreluSupported (const TensorInfoinput,
const TensorInfoalpha,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 611 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

+
615 {
616  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
617 }
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsQLstmSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsQLstmSupported (const TensorInfoinput,
const TensorInfopreviousOutputIn,
const TensorInfopreviousCellStateIn,
const TensorInfooutputStateOut,
const TensorInfocellStateOut,
const TensorInfooutput,
const QLstmDescriptordescriptor,
const LstmInputParamsInfoparamsInfo,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 619 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

+
628 {
629  // Check required here in order to pass IsLayerSupported for datatypes tests
630  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
631  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
632  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
633  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
634  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
635  output.GetDataType() == armnn::DataType::QAsymmS8)
636  {
638  reasonIfUnsupported,
639  input,
640  previousCellStateIn,
641  previousOutputIn,
642  cellStateOut,
643  outputStateOut,
644  output,
645  descriptor,
646  paramsInfo);
647  }
648  else
649  {
650  return false;
651  }
652 }
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
+ + +
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsQuantizedLstmSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsQuantizedLstmSupported (const TensorInfoinput,
const TensorInfocellStateIn,
const TensorInfooutputStateIn,
const TensorInfocellStateOut,
const TensorInfooutputStateOut,
const QuantizedLstmInputParamsInfoparamsInfo,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 664 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

+
671 {
673  reasonIfUnsupported,
674  input,
675  cellStateIn,
676  outputStateIn,
677  cellStateOut,
678  outputStateOut,
679  paramsInfo);
680 }
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsQuantizeSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsQuantizeSupported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 654 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

+
657 {
659  reasonIfUnsupported,
660  input,
661  output);
662 }
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsReshapeSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsReshapeSupported (const TensorInfoinput,
const TensorInfooutput,
const ReshapeDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 682 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

+
686 {
687  armnn::IgnoreUnused(descriptor);
689  reasonIfUnsupported,
690  input,
691  output);
692 }
void IgnoreUnused(Ts &&...)
+
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsResizeBilinearSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsResizeBilinearSupported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 706 of file NeonLayerSupport.cpp.

+ +

References armnn::Bilinear, TensorInfo::GetShape(), NeonLayerSupport::IsResizeSupported(), ResizeDescriptor::m_DataLayout, ResizeDescriptor::m_Method, ResizeDescriptor::m_TargetHeight, ResizeDescriptor::m_TargetWidth, and armnn::NCHW.

+
709 {
710  ResizeDescriptor descriptor;
711  descriptor.m_Method = ResizeMethod::Bilinear;
712  descriptor.m_DataLayout = DataLayout::NCHW;
713 
714  const TensorShape& outputShape = output.GetShape();
715  descriptor.m_TargetHeight = outputShape[2];
716  descriptor.m_TargetWidth = outputShape[3];
717 
718  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
719 }
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
+ + +
+
+
+ +

◆ IsResizeSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsResizeSupported (const TensorInfoinput,
const TensorInfooutput,
const ResizeDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 694 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

+ +

Referenced by NeonLayerSupport::IsResizeBilinearSupported().

+
698 {
700  reasonIfUnsupported,
701  input,
702  output,
703  descriptor);
704 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
+
+
+
+ +

◆ IsRsqrtSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsRsqrtSupported (const TensorInfoinput,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 721 of file NeonLayerSupport.cpp.

+ +

References NeonLayerSupport::IsElementwiseUnarySupported(), and armnn::Rsqrt.

+
724 {
725  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
726  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
727 }
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
+ +
+
+
+ +

◆ IsSliceSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSliceSupported (const TensorInfoinput,
const TensorInfooutput,
const SliceDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 729 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

+
733 {
735  reasonIfUnsupported,
736  input,
737  output,
738  descriptor);
739 }
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsSoftmaxSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSoftmaxSupported (const TensorInfoinput,
const TensorInfooutput,
const SoftmaxDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 741 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

+
745 {
746  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
747 }
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsSpaceToBatchNdSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSpaceToBatchNdSupported (const TensorInfoinput,
const TensorInfooutput,
const SpaceToBatchNdDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 749 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

+
753 {
755  reasonIfUnsupported,
756  input,
757  output,
758  descriptor);
759 }
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsSpaceToDepthSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSpaceToDepthSupported (const TensorInfoinput,
const TensorInfooutput,
const SpaceToDepthDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 761 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

+
765 {
767  reasonIfUnsupported,
768  input,
769  output,
770  descriptor);
771 }
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsSplitterSupported() [1/2]

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSplitterSupported (const TensorInfoinput,
const ViewsDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 773 of file NeonLayerSupport.cpp.

+ +

References TensorInfo::GetDataType(), and armnn::IgnoreUnused().

+
776 {
777  armnn::IgnoreUnused(descriptor);
778  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
779  input.GetDataType(),
780  &TrueFunc<>,
781  &TrueFunc<>);
782 }
void IgnoreUnused(Ts &&...)
+
+
+
+ +

◆ IsSplitterSupported() [2/2]

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSplitterSupported (const TensorInfoinput,
const std::vector< std::reference_wrapper< TensorInfo >> & outputs,
const ViewsDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 784 of file NeonLayerSupport.cpp.

+ +

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

+
788 {
789 #if defined(ARMCOMPUTENEON_ENABLED)
790  // Split along the last dimension, cannot use sub-tensors
791  // as width and height of the sub-tensors do not match
792  // the width and height of the parent tensor
793  // in case of input with more than 2D.
794  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
795  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
796  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
797  {
799  reasonIfUnsupported,
800  input,
801  outputs,
802  *splitAxis.begin());
803  }
804 #endif
805  IgnoreUnused(descriptor);
806  for (auto output : outputs)
807  {
808  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
809  {
810  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
811  return false;
812  }
813  }
814  return true;
815 }
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
+
void IgnoreUnused(Ts &&...)
+
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
+
void SetValueChecked(Optional< T &> optionalRef, V &&val)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsStackSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsStackSupported (const std::vector< const TensorInfo *> & inputs,
const TensorInfooutput,
const StackDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 817 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

+
821 {
823  reasonIfUnsupported,
824  inputs,
825  output,
826  descriptor);
827 }
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsStridedSliceSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsStridedSliceSupported (const TensorInfoinput,
const TensorInfooutput,
const StridedSliceDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 829 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

+
833 {
835  reasonIfUnsupported,
836  input,
837  output,
838  descriptor);
839 }
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsSubtractionSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsSubtractionSupported (const TensorInfoinput0,
const TensorInfoinput1,
const TensorInfooutput,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 841 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

+
845 {
847  reasonIfUnsupported,
848  input0,
849  input1,
850  output);
851 }
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsTransposeConvolution2dSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsTransposeConvolution2dSupported (const TensorInfoinput,
const TensorInfooutput,
const TransposeConvolution2dDescriptordescriptor,
const TensorInfoweights,
const Optional< TensorInfo > & biases,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 853 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

+
859 {
861  reasonIfUnsupported,
862  input,
863  output,
864  descriptor,
865  weights,
866  biases);
867 }
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+ +

◆ IsTransposeSupported()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsTransposeSupported (const TensorInfoinput,
const TensorInfooutput,
const TransposeDescriptordescriptor,
Optional< std::string &> reasonIfUnsupported = EmptyOptional() 
) const
+
+overridevirtual
+
+ +

Reimplemented from LayerSupportBase.

+ +

Definition at line 869 of file NeonLayerSupport.cpp.

+ +

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

+
873 {
874  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
875 }
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
+
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
+
+
+
+
The documentation for this class was generated from the following files: +
+
+ + + + -- cgit v1.2.1