aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-10-09 09:40:58 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:58 +0100
commit7ff76c5b72cdc941310fdd8991014da5b9830c19 (patch)
tree7ef47f8523962e6e4ae0400f6932ca4b88dd6ed0
parent085f0a42623401f78bd1df34cdcebe8555809410 (diff)
downloadarmnn-7ff76c5b72cdc941310fdd8991014da5b9830c19.tar.gz
IVGCVSW-1965: Neon implementation for the ILayerSupport interface
Change-Id: I52f4b44cf3959f49d1066ff7f4b3c1f7682894c9
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp297
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp159
2 files changed, 453 insertions, 3 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index b6d5e4854d..8581cfe5d7 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -5,8 +5,8 @@
#include "NeonLayerSupport.hpp"
-#include <LayerSupportCommon.hpp>
#include <InternalTypes.hpp>
+#include <LayerSupportCommon.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
@@ -35,6 +35,301 @@ using namespace boost;
namespace armnn
{
+bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsActivationSupportedNeon(input, output, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsAdditionSupportedNeon(input0, input1, output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsBatchNormalizationSupportedNeon(input,
+ output,
+ mean,
+ var,
+ beta,
+ gamma,
+ descriptor,
+ reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsConstantSupportedNeon(output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsConvertFp16ToFp32SupportedNeon(input, output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsConvertFp32ToFp16SupportedNeon(input, output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsConvolution2dSupportedNeon(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsDepthwiseConvolutionSupportedNeon(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsDivisionSupportedNeon(input0, input1, output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsFakeQuantizationSupportedNeon(input, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsFloorSupportedNeon(input, output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsFullyConnectedSupportedNeon(input,
+ output,
+ weights,
+ biases,
+ descriptor,
+ reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsInputSupportedNeon(input, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsL2NormalizationSupportedNeon(input, output, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsLstmSupportedNeon(input,
+ outputStateIn,
+ cellStateIn,
+ scratchBuffer,
+ outputStateOut,
+ cellStateOut,
+ output,
+ descriptor,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ inputToInputWeights,
+ recurrentToInputWeights,
+ cellToInputWeights,
+ inputGateBias,
+ projectionWeights,
+ projectionBias,
+ cellToForgetWeights,
+ cellToOutputWeights,
+ reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsMeanSupportedNeon(input, output, descriptor,reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsMergerSupportedNeon(inputs, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsMultiplicationSupportedNeon(input0, input1, output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsNormalizationSupportedNeon(input,
+ output,
+ descriptor,
+ reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsOutputSupportedNeon(output, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsPadSupportedNeon(input, output, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsPermuteSupportedNeon(input, output, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsPooling2dSupportedNeon(input, output, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsReshapeSupportedNeon(input, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsResizeBilinearSupportedNeon(input, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsSoftmaxSupportedNeon(input, output, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsSplitterSupportedNeon(input, descriptor, reasonIfUnsupported);
+}
+
+bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return armnn::IsSubtractionSupportedNeon(input0, input1, output, reasonIfUnsupported);
+}
+
+//
+// Implementation functions
+//
+// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
+
bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
{
// See arm_compute::NEDirectConvolutionLayer documentation for the supported cases,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 468cf58393..91be98182a 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -14,7 +14,162 @@ namespace armnn
class NeonLayerSupport : public ILayerSupport
{
- // TODO implement
+public:
+ bool IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsReshapeSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsResizeBilinearSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
};
bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
@@ -183,4 +338,4 @@ bool IsPadSupportedNeon(const TensorInfo& input,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-}
+} // namespace armnn