diff options
author | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-01-10 17:34:20 +0000 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-01-11 12:20:47 +0000 |
commit | 992d6dc57d8463729910b688f0fb5825d0d3ccf2 (patch) | |
tree | 87b504d174848169550240f300f359dd57aaa1fd /src/armnn/LayerSupport.cpp | |
parent | 1f0ff35236c1dd05954735f7fed9c2807770479e (diff) | |
download | armnn-992d6dc57d8463729910b688f0fb5825d0d3ccf2.tar.gz |
IVGCVSW-2454 Refactor ArmNN to support pluggable backends from a separate
code base
* Made the virtual functions in ILayerSupport.hpp pure
* Created a LayerSupportBase class with the default implementation of
the interface
* Made the backend layer support classes inherit from the base
class, instead of directly from the interface
* Refactored the profiler and the profiling event classes to use
the BackendId instead of the Compute
* Implemented a proper MemCopy support method
* Changed Compute to BackendId in the profiling API and objects
* Removed static references to pluggable backends
!android-nn-driver:492
Change-Id: Id6332b5f48c980819e0a09adc818d1effd057296
Diffstat (limited to 'src/armnn/LayerSupport.cpp')
-rw-r--r-- | src/armnn/LayerSupport.cpp | 239 |
1 files changed, 125 insertions, 114 deletions
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index cf6ce27dda..b0b3eccb02 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -16,11 +16,9 @@ #include <unordered_map> #include <armnn/ArmNN.hpp> -namespace armnn -{ - namespace { + /// Helper function to copy a full string to a truncated version. void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength) { @@ -33,10 +31,13 @@ void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxL } } -} +} // anonymous namespace + +namespace armnn +{ // Helper macro to avoid code duplication. -// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute. +// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId. #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \ std::string reasonIfUnsupportedFull; \ bool isSupported; \ @@ -177,6 +178,18 @@ bool IsDebugSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output, descriptor); } +bool IsDepthwiseConvolutionSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional<TensorInfo>& biases, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases); +} + bool IsDivisionSupported(const BackendId& backend, const TensorInfo& input0, const TensorInfo& input1, @@ -187,36 +200,39 @@ bool IsDivisionSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output); } -bool IsSubtractionSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) +bool IsEqualSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output); + FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output); } -bool IsDepthwiseConvolutionSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - const DepthwiseConvolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional<TensorInfo>& biases, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) +bool IsFakeQuantizationSupported(const BackendId& backend, + const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases); + FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor); } -bool IsInputSupported(const BackendId& backend, +bool IsFloorSupported(const BackendId& backend, const TensorInfo& input, + const TensorInfo& output, char* reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input); -} + // By definition (that is, regardless of compute device), shapes and data type must match. + if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType()) + { + return false; + } + FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output); +} bool IsFullyConnectedSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -229,6 +245,25 @@ bool IsFullyConnectedSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor); } +bool IsGreaterSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output); +} + +bool IsInputSupported(const BackendId& backend, + const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input); +} + + bool IsL2NormalizationSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -276,6 +311,25 @@ bool IsMaximumSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output); } +bool IsMeanSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor); +} + +bool IsMemCopySupported(const BackendId &backend, + const TensorInfo &input, + const TensorInfo &output, + char *reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output); +} + bool IsMergerSupported(const BackendId& backend, std::vector<const TensorInfo*> inputs, const TensorInfo& output, @@ -287,6 +341,16 @@ bool IsMergerSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor); } +bool IsMinimumSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output); +} + bool IsMultiplicationSupported(const BackendId& backend, const TensorInfo& input0, const TensorInfo& input1, @@ -315,6 +379,17 @@ bool IsOutputSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output); } +bool IsPadSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + + FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor); +} + bool IsPermuteSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -335,6 +410,15 @@ bool IsPooling2dSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor); } +bool IsReshapeSupported(const BackendId& backend, + const TensorInfo& input, + const ReshapeDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor); +} + bool IsResizeBilinearSupported(const BackendId& backend, const TensorInfo& input, char* reasonIfUnsupported, @@ -343,6 +427,15 @@ bool IsResizeBilinearSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input); } +bool IsRsqrtSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output); +} + bool IsSoftmaxSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -372,68 +465,6 @@ bool IsSplitterSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor); } -bool IsFakeQuantizationSupported(const BackendId& backend, - const TensorInfo& input, - const FakeQuantizationDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor); -} - -bool IsReshapeSupported(const BackendId& backend, - const TensorInfo& input, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input); -} - -bool IsRsqrtSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output); -} - -bool IsFloorSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - // By definition (that is, regardless of compute device), shapes and data type must match. - if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType()) - { - return false; - } - - FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output); -} - -bool IsMeanSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor); -} - -bool IsPadSupported(const BackendId& backend, - const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - - FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor); -} - bool IsStridedSliceSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -444,34 +475,14 @@ bool IsStridedSliceSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor); } -bool IsMinimumSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output); -} - -bool IsGreaterSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) -{ - FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output); -} - -bool IsEqualSupported(const BackendId& backend, - const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - char* reasonIfUnsupported, - size_t reasonIfUnsupportedMaxLength) +bool IsSubtractionSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output); + FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output); } -} +} // namespace armnn |