diff options
author | arovir01 <Aron.Virginas-Tar@arm.com> | 2018-10-09 09:40:58 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:58 +0100 |
commit | 7ff76c5b72cdc941310fdd8991014da5b9830c19 (patch) | |
tree | 7ef47f8523962e6e4ae0400f6932ca4b88dd6ed0 /src/backends/neon/NeonLayerSupport.hpp | |
parent | 085f0a42623401f78bd1df34cdcebe8555809410 (diff) | |
download | armnn-7ff76c5b72cdc941310fdd8991014da5b9830c19.tar.gz |
IVGCVSW-1965: Neon implementation for the ILayerSupport interface
Change-Id: I52f4b44cf3959f49d1066ff7f4b3c1f7682894c9
Diffstat (limited to 'src/backends/neon/NeonLayerSupport.hpp')
-rw-r--r-- | src/backends/neon/NeonLayerSupport.hpp | 159 |
1 files changed, 157 insertions, 2 deletions
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 468cf58393..91be98182a 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -14,7 +14,162 @@ namespace armnn class NeonLayerSupport : public ILayerSupport { - // TODO implement +public: + bool IsActivationSupported(const TensorInfo& input, + const TensorInfo& output, + const ActivationDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsBatchNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& mean, + const TensorInfo& var, + const TensorInfo& beta, + const TensorInfo& gamma, + const BatchNormalizationDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConstantSupported(const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConvertFp16ToFp32Supported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConvertFp32ToFp16Supported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsDepthwiseConvolutionSupported(const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsDivisionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsFakeQuantizationSupported(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsFloorSupported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsFullyConnectedSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& weights, + const TensorInfo& biases, + const FullyConnectedDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsInputSupported(const TensorInfo& input, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsL2NormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsLstmSupported(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsMeanSupported(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsMultiplicationSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsOutputSupported(const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPadSupported(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPermuteSupported(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPooling2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsReshapeSupported(const TensorInfo& input, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsResizeBilinearSupported(const TensorInfo& input, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsSoftmaxSupported(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsSplitterSupported(const TensorInfo& input, + const ViewsDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + + bool IsSubtractionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; }; bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc); @@ -183,4 +338,4 @@ bool IsPadSupportedNeon(const TensorInfo& input, const PadDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()); -} +} // namespace armnn |