From d6c10ed456eb6d8f2569f9bebefe915c1e5951b7 Mon Sep 17 00:00:00 2001 From: arovir01 Date: Fri, 5 Oct 2018 15:46:51 +0100 Subject: IVGCVSW-1973: Replace char* reasonIfNotSupported with Optional in ILayerSupport Change-Id: Id5db71a0b6f4bdc5d97210652b9028e3c5dcc39a --- include/armnn/ILayerSupport.hpp | 84 +++++++-------------- src/backends/ILayerSupport.cpp | 157 ++++++++++++++++------------------------ 2 files changed, 92 insertions(+), 149 deletions(-) diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index b9b41b7fcf..c595075a63 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -24,14 +24,12 @@ public: virtual bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsAdditionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, @@ -40,68 +38,57 @@ public: const TensorInfo& beta, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsConstantSupported(const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsConvolution2dSupported(const TensorInfo& input, const TensorInfo& output, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsDepthwiseConvolutionSupported(const TensorInfo& input, const TensorInfo& output, const DepthwiseConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsDivisionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsInputSupported(const TensorInfo& input, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsFullyConnectedSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, const TensorInfo& biases, const FullyConnectedDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsL2NormalizationSupported(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsLstmSupported(const TensorInfo& input, const TensorInfo& outputStateIn, @@ -128,82 +115,67 @@ public: const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights, const TensorInfo* cellToOutputWeights, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsMergerSupported(const std::vector inputs, const OriginsDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsOutputSupported(const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsPermuteSupported(const TensorInfo& input, const TensorInfo& output, const PermuteDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsPooling2dSupported(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsResizeBilinearSupported(const TensorInfo& input, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsSplitterSupported(const TensorInfo& input, const ViewsDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsReshapeSupported(const TensorInfo& input, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsMeanSupported(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; virtual bool IsPadSupported(const TensorInfo& input, const TensorInfo& output, const PadDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024) const; + Optional reasonIfUnsupported = EmptyOptional()) const; }; // class ILayerSupport diff --git a/src/backends/ILayerSupport.cpp b/src/backends/ILayerSupport.cpp index ff4b80d22b..286f1255e5 100644 --- a/src/backends/ILayerSupport.cpp +++ b/src/backends/ILayerSupport.cpp @@ -14,18 +14,17 @@ namespace bool DefaultLayerSupport(const char* func, const char* file, unsigned int line, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) + Optional reasonIfUnsupported) { - if (reasonIfUnsupported != nullptr && reasonIfUnsupportedMaxLength > 0) + // NOTE: We only need to return the reason if the optional parameter is not empty + if (reasonIfUnsupported) { - snprintf(reasonIfUnsupported, - reasonIfUnsupportedMaxLength, - "%s is not supported [%s:%d]", - func, - file, - line); + std::stringstream message; + message << func << "is not implemented [" << file << ":" << line << "]"; + + reasonIfUnsupported.value() = message.str(); } + return false; } @@ -34,19 +33,17 @@ bool DefaultLayerSupport(const char* func, bool ILayerSupport::IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsAdditionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, @@ -56,33 +53,29 @@ bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& beta, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsConstantSupported(const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input, @@ -90,10 +83,9 @@ bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, @@ -101,35 +93,31 @@ bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const DepthwiseConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsDivisionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsInputSupported(const TensorInfo& input, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsFullyConnectedSupported(const TensorInfo& input, @@ -137,19 +125,17 @@ bool ILayerSupport::IsFullyConnectedSupported(const TensorInfo& input, const TensorInfo& weights, const TensorInfo& biases, const FullyConnectedDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsL2NormalizationSupported(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsLstmSupported(const TensorInfo& input, @@ -177,126 +163,111 @@ bool ILayerSupport::IsLstmSupported(const TensorInfo& input, const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights, const TensorInfo* cellToOutputWeights, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsMergerSupported(const std::vector inputs, const OriginsDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsOutputSupported(const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsPermuteSupported(const TensorInfo& input, const TensorInfo& output, const PermuteDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsPooling2dSupported(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsResizeBilinearSupported(const TensorInfo& input, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsSplitterSupported(const TensorInfo& input, const ViewsDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsReshapeSupported(const TensorInfo& input, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsMeanSupported(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool ILayerSupport::IsPadSupported(const TensorInfo& input, const TensorInfo& output, const PadDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) const + Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } } -- cgit v1.2.1