From e97c6e0b88fd0a17991c1be5fc66e466e29e5b8d Mon Sep 17 00:00:00 2001 From: David Beck Date: Wed, 3 Oct 2018 13:09:28 +0100 Subject: IVGCVSW-1642 : adding IBackendInternal interface Change-Id: Icd55fed8381af319f11b4cd977cf03103cdf1bd9 --- CMakeLists.txt | 1 + include/armnn/ILayerSupport.hpp | 210 ++++++++++++++++++++++++++ include/armnn/Types.hpp | 2 + src/backends/CMakeLists.txt | 2 + src/backends/IBackendInternal.hpp | 23 +++ src/backends/ILayerSupport.cpp | 302 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 540 insertions(+) create mode 100644 include/armnn/ILayerSupport.hpp create mode 100644 src/backends/IBackendInternal.hpp create mode 100644 src/backends/ILayerSupport.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 39d83c22bb..56b0935654 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -150,6 +150,7 @@ list(APPEND armnn_sources include/armnn/Descriptors.hpp include/armnn/DescriptorsFwd.hpp include/armnn/IRuntime.hpp + include/armnn/ILayerSupport.hpp include/armnn/INetwork.hpp include/armnn/Tensor.hpp include/armnn/TensorFwd.hpp diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp new file mode 100644 index 0000000000..7962393f34 --- /dev/null +++ b/include/armnn/ILayerSupport.hpp @@ -0,0 +1,210 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include + +namespace armnn +{ + +class TensorInfo; + +class ILayerSupport +{ +protected: + ILayerSupport() {} + virtual ~ILayerSupport() {} + +public: + virtual bool IsActivationSupported(const TensorInfo& input, + const TensorInfo& output, + const ActivationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsBatchNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& mean, + const TensorInfo& var, + const TensorInfo& beta, + const TensorInfo& gamma, + const BatchNormalizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsConstantSupported(const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsConvertFp32ToFp16Supported(const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const boost::optional& biases, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsDepthwiseConvolutionSupported(const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const boost::optional& biases, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsDivisionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsSubtractionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsInputSupported(const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsFullyConnectedSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& weights, + const TensorInfo& biases, + const FullyConnectedDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsL2NormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsLstmSupported(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsMergerSupported(const std::vector inputs, + const OriginsDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsMultiplicationSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsOutputSupported(const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsPermuteSupported(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsPooling2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsResizeBilinearSupported(const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsSoftmaxSupported(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsSplitterSupported(const TensorInfo& input, + const ViewsDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsFakeQuantizationSupported(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsReshapeSupported(const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsFloorSupported(const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsMeanSupported(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + + virtual bool IsPadSupported(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024) const; + +}; // class ILayerSupport + +} // namespace armnn diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index d0a0174ecd..12ecda0c39 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -6,6 +6,7 @@ #include #include +#include "ILayerSupport.hpp" namespace armnn { @@ -113,6 +114,7 @@ protected: public: virtual const std::string& GetId() const = 0; + virtual const ILayerSupport& GetLayerSupport() const = 0; }; using IBackendPtr = std::shared_ptr; diff --git a/src/backends/CMakeLists.txt b/src/backends/CMakeLists.txt index c9c5cc1a7e..ea5ad7814c 100644 --- a/src/backends/CMakeLists.txt +++ b/src/backends/CMakeLists.txt @@ -7,6 +7,8 @@ list(APPEND armnnBackendsCommon_sources CpuTensorHandle.cpp CpuTensorHandleFwd.hpp CpuTensorHandle.hpp + IBackendInternal.hpp + ILayerSupport.cpp ITensorHandle.hpp MakeWorkloadHelper.hpp MemCopyWorkload.cpp diff --git a/src/backends/IBackendInternal.hpp b/src/backends/IBackendInternal.hpp new file mode 100644 index 0000000000..1ccf88ece6 --- /dev/null +++ b/src/backends/IBackendInternal.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include +#include + +namespace armnn +{ + +class IBackendInternal : public IBackend +{ +protected: + IBackendInternal() {} + virtual ~IBackendInternal() {} + +public: + virtual std::unique_ptr CreateWorkloadFactory() const = 0; +}; + +} // namespace armnn diff --git a/src/backends/ILayerSupport.cpp b/src/backends/ILayerSupport.cpp new file mode 100644 index 0000000000..c0446e93ba --- /dev/null +++ b/src/backends/ILayerSupport.cpp @@ -0,0 +1,302 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +namespace armnn +{ + +namespace +{ + +bool DefaultLayerSupport(const char* func, + const char* file, + unsigned int line, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + if (reasonIfUnsupported != nullptr && reasonIfUnsupportedMaxLength > 0) + { + snprintf(reasonIfUnsupported, + reasonIfUnsupportedMaxLength, + "%s is not supported [%s:%d]", + func, + file, + line); + } + return false; +} + +} + +bool ILayerSupport::IsActivationSupported(const TensorInfo& input, + const TensorInfo& output, + const ActivationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& mean, + const TensorInfo& var, + const TensorInfo& beta, + const TensorInfo& gamma, + const BatchNormalizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsConstantSupported(const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const boost::optional& biases, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const boost::optional& biases, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsDivisionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsInputSupported(const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsFullyConnectedSupported(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& weights, + const TensorInfo& biases, + const FullyConnectedDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsL2NormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsLstmSupported(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsMergerSupported(const std::vector inputs, + const OriginsDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsMultiplicationSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsNormalizationSupported(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsOutputSupported(const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsPermuteSupported(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsPooling2dSupported(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsResizeBilinearSupported(const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsSoftmaxSupported(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsSplitterSupported(const TensorInfo& input, + const ViewsDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsFakeQuantizationSupported(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsReshapeSupported(const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsFloorSupported(const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsMeanSupported(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +bool ILayerSupport::IsPadSupported(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported, reasonIfUnsupportedMaxLength); +} + +} -- cgit v1.2.1