From 1c7c81bd92d02fab45b4ab0bcca911db80999a6b Mon Sep 17 00:00:00 2001 From: arovir01 Date: Mon, 8 Oct 2018 11:34:28 +0100 Subject: IVGCVSW-1966: Ref implementation for the ILayerSupport interface Change-Id: Idd572cae3a131acb11e884e33c0035ca74c95055 --- src/backends/reference/RefLayerSupport.cpp | 313 ++++++++++++++++++++++++++++- src/backends/reference/RefLayerSupport.hpp | 157 ++++++++++++++- 2 files changed, 468 insertions(+), 2 deletions(-) diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index a42efb748f..e6b1442e4d 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -17,6 +17,317 @@ using namespace boost; namespace armnn { +namespace +{ + +std::string* GetReasonIfUnsupportedPtr(const Optional& reasonIfUnsupported) +{ + return reasonIfUnsupported ? &reasonIfUnsupported.value() : nullptr; +} + +} // anonymous namespace + +bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, + const TensorInfo& output, + const ActivationDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsActivationSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsAdditionSupportedRef(input0, + input1, + output, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& mean, + const TensorInfo& var, + const TensorInfo& beta, + const TensorInfo& gamma, + const BatchNormalizationDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsBatchNormalizationSupportedRef(input, + output, + mean, + var, + beta, + gamma, + descriptor, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsConstantSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsConvertFp16ToFp32SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsConvertFp32ToFp16SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported) const +{ + return armnn::IsConvolution2dSupportedRef(input, + output, + descriptor, + weights, + biases, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported) const +{ + return armnn::IsDepthwiseConvolutionSupportedRef(input, + output, + descriptor, + weights, + biases, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsDivisionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsFakeQuantizationSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsFloorSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsFloorSupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& weights, + const TensorInfo& biases, + const FullyConnectedDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsFullyConnectedSupportedRef(input, + output, + weights, + biases, + descriptor, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsInputSupported(const TensorInfo& input, + Optional reasonIfUnsupported) const +{ + return armnn::IsInputSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsL2NormalizationSupportedRef(input, + output, + descriptor, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsLstmSupported(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + Optional reasonIfUnsupported) const +{ + return armnn::IsLstmSupportedRef(input, + outputStateIn, + cellStateIn, + scratchBuffer, + outputStateOut, + cellStateOut, + output, + descriptor, + inputToForgetWeights, + inputToCellWeights, + inputToOutputWeights, + recurrentToForgetWeights, + recurrentToCellWeights, + recurrentToOutputWeights, + forgetGateBias, + cellBias, + outputGateBias, + inputToInputWeights, + recurrentToInputWeights, + cellToInputWeights, + inputGateBias, + projectionWeights, + projectionBias, + cellToForgetWeights, + cellToOutputWeights, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsMeanSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsMergerSupported(const std::vector inputs, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsMergerSupportedRef(inputs, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsMultiplicationSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsNormalizationSupportedRef(input, + output, + descriptor, + GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsOutputSupported(const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsOutputSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsPadSupported(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsPadSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsPermuteSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsPooling2dSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input, + Optional reasonIfUnsupported) const +{ + return armnn::IsReshapeSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, + Optional reasonIfUnsupported) const +{ + return armnn::IsResizeBilinearSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsSoftmaxSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, + const ViewsDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return armnn::IsSplitterSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return armnn::IsSubtractionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported)); +} + +// +// Implementation functions +// +// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete! + template bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported, DataType dataType, @@ -412,4 +723,4 @@ bool IsPadSupportedRef(const TensorInfo& input, return false; } -} +} // namespace armnn diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index dcc5dd3ddf..25501fe016 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -14,7 +14,162 @@ namespace armnn class RefLayerSupport : public ILayerSupport { - // TODO implement +public: + bool IsActivationSupported(const TensorInfo& input, + const TensorInfo& output, + const ActivationDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsBatchNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& mean, + const TensorInfo& var, + const TensorInfo& beta, + const TensorInfo& gamma, + const BatchNormalizationDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConstantSupported(const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConvertFp16ToFp32Supported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConvertFp32ToFp16Supported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsDepthwiseConvolutionSupported(const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsDivisionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsFakeQuantizationSupported(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsFloorSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsFullyConnectedSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& weights, + const TensorInfo& biases, + const FullyConnectedDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsInputSupported(const TensorInfo& input, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsL2NormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsLstmSupported(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsMeanSupported(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsMergerSupported(const std::vector inputs, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsMultiplicationSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsOutputSupported(const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPadSupported(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPermuteSupported(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPooling2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsReshapeSupported(const TensorInfo& input, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsResizeBilinearSupported(const TensorInfo& input, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsSoftmaxSupported(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsSplitterSupported(const TensorInfo& input, + const ViewsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsSubtractionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; }; bool IsActivationSupportedRef(const TensorInfo& input, -- cgit v1.2.1