diff options
author | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-01-10 17:34:20 +0000 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-01-11 12:20:47 +0000 |
commit | 992d6dc57d8463729910b688f0fb5825d0d3ccf2 (patch) | |
tree | 87b504d174848169550240f300f359dd57aaa1fd /include/armnn | |
parent | 1f0ff35236c1dd05954735f7fed9c2807770479e (diff) | |
download | armnn-992d6dc57d8463729910b688f0fb5825d0d3ccf2.tar.gz |
IVGCVSW-2454 Refactor ArmNN to support pluggable backends from a separate
code base
* Made the virtual functions in ILayerSupport.hpp pure
* Created a LayerSupportBase class with the default implementation of
the interface
* Made the backend layer support classes inherit from the base
class, instead of directly from the interface
* Refactored the profiler and the profiling event classes to use
the BackendId instead of the Compute
* Implemented a proper MemCopy support method
* Changed Compute to BackendId in the profiling API and objects
* Removed static references to pluggable backends
!android-nn-driver:492
Change-Id: Id6332b5f48c980819e0a09adc818d1effd057296
Diffstat (limited to 'include/armnn')
-rw-r--r-- | include/armnn/BackendId.hpp | 23 | ||||
-rw-r--r-- | include/armnn/ILayerSupport.hpp | 90 | ||||
-rw-r--r-- | include/armnn/LayerSupport.hpp | 152 |
3 files changed, 139 insertions, 126 deletions
diff --git a/include/armnn/BackendId.hpp b/include/armnn/BackendId.hpp index 8de985ec2f..87206073be 100644 --- a/include/armnn/BackendId.hpp +++ b/include/armnn/BackendId.hpp @@ -20,13 +20,13 @@ namespace armnn // enum class Compute { + Undefined = 0, /// CPU Execution: Reference C++ kernels - CpuRef = 0, + CpuRef = 1, /// CPU Execution: NEON: ArmCompute - CpuAcc = 1, + CpuAcc = 2, /// GPU Execution: OpenCL: ArmCompute - GpuAcc = 2, - Undefined = 5 + GpuAcc = 3 }; /// Deprecated function that will be removed together with @@ -46,7 +46,8 @@ constexpr char const* GetComputeDeviceAsCString(Compute compute) /// the Compute enum inline std::ostream& operator<<(std::ostream& os, const std::vector<Compute>& compute) { - for (const Compute& comp : compute) { + for (const Compute& comp : compute) + { os << GetComputeDeviceAsCString(comp) << " "; } return os; @@ -56,7 +57,8 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector<Compute>& co /// the Compute enum inline std::ostream& operator<<(std::ostream& os, const std::set<Compute>& compute) { - for (const Compute& comp : compute) { + for (const Compute& comp : compute) + { os << GetComputeDeviceAsCString(comp) << " "; } return os; @@ -70,13 +72,10 @@ inline std::ostream& operator<<(std::ostream& os, const Compute& compute) return os; } -struct UninitializedBackendId {}; - class BackendId final { public: - BackendId() { GetComputeDeviceAsCString(Compute::Undefined); } - BackendId(UninitializedBackendId) { GetComputeDeviceAsCString(Compute::Undefined); } + BackendId() : m_Id(GetComputeDeviceAsCString(Compute::Undefined)) {} BackendId(const std::string& id) : m_Id{id} {} BackendId(const char* id) : m_Id{id} {} @@ -132,7 +131,7 @@ private: std::string m_Id; }; -} +} // namespace armnn namespace std { @@ -163,7 +162,7 @@ inline std::ostream& operator<<(std::ostream& os, const BackendId& id) template <template <typename...> class TContainer, typename... TContainerTemplateArgs> std::ostream& operator<<(std::ostream& os, - const TContainer<BackendId, TContainerTemplateArgs...>& ids) + const TContainer<BackendId, TContainerTemplateArgs...>& ids) { os << '['; for (const auto& id : ids) { os << id << " "; } diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index af38b07f39..bba344975a 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -25,12 +25,12 @@ public: virtual bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsAdditionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, @@ -39,75 +39,76 @@ public: const TensorInfo& beta, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsBatchToSpaceNdSupported(const TensorInfo& input, const TensorInfo& output, const BatchToSpaceNdDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsConstantSupported(const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsConvolution2dSupported(const TensorInfo& input, const TensorInfo& output, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional<TensorInfo>& biases, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsDebugSupported(const TensorInfo& input, const TensorInfo& output, const DebugDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; - virtual bool IsDepthwiseConvolutionSupported(const TensorInfo& input, - const TensorInfo& output, - const DepthwiseConvolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional<TensorInfo>& biases, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + virtual bool IsDepthwiseConvolutionSupported( + const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsDivisionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsFullyConnectedSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, const TensorInfo& biases, const FullyConnectedDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsInputSupported(const TensorInfo& input, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsL2NormalizationSupported(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsLstmSupported(const TensorInfo& input, const TensorInfo& outputStateIn, @@ -134,94 +135,99 @@ public: const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights, const TensorInfo* cellToOutputWeights, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsMaximumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsMeanSupported(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; + + virtual bool IsMemCopySupported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, const TensorInfo& output, const OriginsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& ouput, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsOutputSupported(const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsPadSupported(const TensorInfo& input, const TensorInfo& output, const PadDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsPermuteSupported(const TensorInfo& input, const TensorInfo& output, const PermuteDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsPooling2dSupported(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsReshapeSupported(const TensorInfo& input, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + const ReshapeDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsSpaceToBatchNdSupported(const TensorInfo& input, const TensorInfo& output, const SpaceToBatchNdDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsResizeBilinearSupported(const TensorInfo& input, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsSplitterSupported(const TensorInfo& input, const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsGreaterSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& ouput, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const; + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0; }; // class ILayerSupport using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>; diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index 446dd705b0..8286ec6109 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -105,16 +105,24 @@ bool IsDivisionSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsSubtractionSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); +bool IsEqualSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsInputSupported(const BackendId& backend, +bool IsFakeQuantizationSupported(const BackendId& backend, + const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsFloorSupported(const BackendId& backend, const TensorInfo& input, + const TensorInfo& output, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); @@ -129,6 +137,20 @@ bool IsFullyConnectedSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsGreaterSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsInputSupported(const BackendId& backend, + const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsL2NormalizationSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -161,6 +183,21 @@ bool IsMaximumSupported(const BackendId& backend, size_t reasonIfUnSupportedMaxLength = 0); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsMeanSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsMemCopySupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsMergerSupported(const BackendId& backend, const std::vector<const TensorInfo*> inputs, const TensorInfo& output, @@ -169,6 +206,14 @@ bool IsMergerSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsMinimumSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsMultiplicationSupported(const BackendId& backend, const TensorInfo& input0, const TensorInfo& input1, @@ -191,6 +236,14 @@ bool IsOutputSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsPadSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsPermuteSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -207,12 +260,26 @@ bool IsPooling2dSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsReshapeSupported(const BackendId& backend, + const TensorInfo& input, + const ReshapeDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsResizeBilinearSupported(const BackendId& backend, const TensorInfo& input, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsRsqrtSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsSoftmaxSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -236,49 +303,6 @@ bool IsSplitterSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsFakeQuantizationSupported(const BackendId& backend, - const TensorInfo& input, - const FakeQuantizationDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsReshapeSupported(const BackendId& backend, - const TensorInfo& input, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsRsqrtSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsFloorSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsMeanSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsPadSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsStridedSliceSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -287,26 +311,10 @@ bool IsStridedSliceSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsMinimumSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsGreaterSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); - -/// Deprecated in favor of IBackend and ILayerSupport interfaces -bool IsEqualSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported = nullptr, - size_t reasonIfUnsupportedMaxLength = 1024); +bool IsSubtractionSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); } |