aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/NeonLayerSupport.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/NeonLayerSupport.cpp')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp421
1 files changed, 419 insertions, 2 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index d5dd238bd8..2b2229a4de 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -7,7 +7,6 @@
#include "NeonBackendId.hpp"
#include "NeonBackendModelContext.hpp"
-#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Types.hpp>
@@ -146,6 +145,424 @@ NeonLayerSupport::NeonLayerSupport()
{
}
+bool NeonLayerSupport::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch (type)
+ {
+ case LayerType::Activation:
+ return IsActivationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ArgMinMax:
+ return IsArgMinMaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchNormalization:
+ return IsBatchNormalizationSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchToSpaceNd:
+ return IsBatchToSpaceNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Comparison:
+ return IsComparisonSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Concat:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < (infos.size() - 1); i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsConcatSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Constant:
+ return IsConstantSupported(infos[0], reasonIfUnsupported);
+ case LayerType::ConvertBf16ToFp32:
+ return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp16ToFp32:
+ return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToBf16:
+ return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToFp16:
+ return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Convolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::DepthToSpace:
+ return IsDepthToSpaceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::DepthwiseConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Dequantize:
+ return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Division:
+ return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ElementwiseUnary:
+ return IsElementwiseUnarySupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Fill:
+ return IsFillSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Floor:
+ return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::FullyConnected:
+ return IsFullyConnectedSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Gather:
+ return IsGatherSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::InstanceNormalization:
+ return IsInstanceNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::L2Normalization:
+ return IsL2NormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogicalBinary:
+ return IsLogicalBinarySupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogSoftmax:
+ return IsLogSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Lstm:
+ return IsLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::QLstm:
+ return IsQLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Maximum:
+ return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Mean:
+ return IsMeanSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Minimum:
+ return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Multiplication:
+ return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Normalization:
+ return IsNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Pad:
+ return IsPadSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Permute:
+ return IsPermuteSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Pooling2d:
+ return IsPooling2dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Prelu:
+ return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Quantize:
+ return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::QuantizedLstm:
+ return IsQuantizedLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ quantizedLstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Reshape:
+ return IsReshapeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Resize:
+ return IsResizeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Reduce:
+ return IsReduceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Slice:
+ return IsSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Softmax:
+ return IsSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToBatchNd:
+ return IsSpaceToBatchNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToDepth:
+ return IsSpaceToDepthSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Splitter:
+ {
+ std::vector<TensorInfo> outputInfos;
+ for (uint32_t i = 1; i < infos.size(); i++)
+ {
+ outputInfos.push_back(infos[i]);
+ }
+ return IsSplitterSupported(infos[0],
+ {outputInfos.begin(), outputInfos.end()},
+ *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Stack:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < infos.size() - 1; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsStackSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::StridedSlice:
+ return IsStridedSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Subtraction:
+ return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Transpose:
+ return IsTransposeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::TransposeConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Cast:
+ return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ChannelShuffle:
+ return IsChannelShuffleSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Convolution3d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::MemCopy:
+ return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::MemImport:
+ return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::DetectionPostProcess:
+ {
+ auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
+ return LayerSupportBase::IsDetectionPostProcessSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ desc,
+ reasonIfUnsupported);
+ }
+ case LayerType::Map:
+ return true;
+ case LayerType::Unmap:
+ return true;
+ case LayerType::Merge:
+ return LayerSupportBase::IsMergeSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Rank:
+ return true;
+ case LayerType::Shape:
+ return LayerSupportBase::IsShapeSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ default:
+ // layers not supported in neon by default:
+ // debug, fakequantization, precompiled, standin,
+ // switch, unidirectionalsequencelstm, pooling3d
+ return false;
+ }
+}
+
bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -256,7 +673,7 @@ bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())