From 4a3c61091037e7e86e8b03bb060d8c1ab82731a9 Mon Sep 17 00:00:00 2001 From: josh minor Date: Mon, 6 Jan 2020 16:40:46 -0600 Subject: IVGCVSW-4259 Add frontend and reference workload for UnaryOperationLayer * Added new layer named ElementwiseUnary * Deprecated existing Abs/Rsqrt layer functions * Updated existing Abs/Rsqrt test infrastructure to use new layer * Added boilerplate for new Exp,Neg,Sqrt elemwise op layers * AbsQuantize test removed pending future commit * Serialization support added !android-nn-driver:2550 Change-Id: Ic595c645925e17b45db568187fd05646daf2e87f Signed-off-by: josh minor --- Android.mk | 3 +- CMakeLists.txt | 6 +- include/armnn/Descriptors.hpp | 20 +++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 7 + include/armnn/ILayerVisitor.hpp | 10 ++ include/armnn/INetwork.hpp | 9 ++ include/armnn/LayerVisitorBase.hpp | 4 + include/armnn/Types.hpp | 9 ++ include/armnn/TypesUtils.hpp | 13 ++ src/armnn/InternalTypes.cpp | 3 +- src/armnn/InternalTypes.hpp | 5 +- src/armnn/LayerSupport.cpp | 6 +- src/armnn/LayersFwd.hpp | 6 +- src/armnn/Network.cpp | 10 +- src/armnn/Network.hpp | 5 + src/armnn/QuantizerVisitor.cpp | 20 ++- src/armnn/QuantizerVisitor.hpp | 7 +- src/armnn/layers/ElementwiseUnaryLayer.cpp | 62 ++++++++ src/armnn/layers/ElementwiseUnaryLayer.hpp | 48 ++++++ src/armnn/test/CreateWorkload.hpp | 37 +---- src/armnn/test/QuantizerTest.cpp | 55 ------- .../test/TestNameAndDescriptorLayerVisitor.cpp | 7 + .../test/TestNameAndDescriptorLayerVisitor.hpp | 1 + src/armnn/test/TestNameOnlyLayerVisitor.cpp | 2 - src/armnn/test/TestNameOnlyLayerVisitor.hpp | 2 - src/armnnDeserializer/Deserializer.cpp | 53 ++++++- src/armnnDeserializer/Deserializer.hpp | 1 + src/armnnSerializer/ArmnnSchema.fbs | 25 ++- src/armnnSerializer/Serializer.cpp | 15 ++ src/armnnSerializer/Serializer.hpp | 6 + src/armnnSerializer/SerializerSupport.md | 7 +- src/armnnSerializer/SerializerUtils.cpp | 19 +++ src/armnnSerializer/SerializerUtils.hpp | 2 + src/armnnSerializer/test/SerializerTests.cpp | 51 ------- src/armnnTfParser/TfParser.cpp | 3 +- src/backends/backendsCommon/LayerSupportBase.cpp | 23 +++ src/backends/backendsCommon/LayerSupportBase.hpp | 7 + src/backends/backendsCommon/WorkloadData.cpp | 24 +++ src/backends/backendsCommon/WorkloadData.hpp | 5 + src/backends/backendsCommon/WorkloadFactory.cpp | 37 ++--- src/backends/backendsCommon/WorkloadFactory.hpp | 5 + .../backendsCommon/WorkloadFactoryBase.hpp | 16 ++ src/backends/backendsCommon/common.mk | 1 + .../backendsCommon/test/AbsEndToEndTestImpl.hpp | 65 -------- src/backends/backendsCommon/test/CMakeLists.txt | 4 +- .../test/ElementwiseUnaryEndToEndTestImpl.hpp | 77 ++++++++++ .../test/IsLayerSupportedTestImpl.hpp | 6 +- src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../backendsCommon/test/layerTests/AbsTestImpl.cpp | 164 ++++---------------- .../test/layerTests/ElementwiseTestImpl.hpp | 3 +- .../test/layerTests/ElementwiseUnaryTestImpl.cpp | 14 ++ .../test/layerTests/ElementwiseUnaryTestImpl.hpp | 113 ++++++++++++++ .../test/layerTests/RsqrtTestImpl.cpp | 167 +++++---------------- src/backends/cl/ClLayerSupport.cpp | 32 +++- src/backends/cl/ClLayerSupport.hpp | 7 + src/backends/cl/ClWorkloadFactory.cpp | 36 ++++- src/backends/cl/ClWorkloadFactory.hpp | 5 + src/backends/cl/test/ClCreateWorkloadTests.cpp | 14 +- src/backends/cl/test/ClEndToEndTests.cpp | 12 +- src/backends/neon/NeonLayerSupport.cpp | 32 +++- src/backends/neon/NeonLayerSupport.hpp | 7 + src/backends/neon/NeonWorkloadFactory.cpp | 37 ++++- src/backends/neon/NeonWorkloadFactory.hpp | 5 + src/backends/neon/test/NeonCreateWorkloadTests.cpp | 30 ---- src/backends/neon/test/NeonEndToEndTests.cpp | 12 +- src/backends/reference/RefLayerSupport.cpp | 85 +++++------ src/backends/reference/RefLayerSupport.hpp | 9 +- src/backends/reference/RefWorkloadFactory.cpp | 18 ++- src/backends/reference/RefWorkloadFactory.hpp | 5 + src/backends/reference/backend.mk | 5 +- .../reference/test/RefCreateWorkloadTests.cpp | 35 ----- src/backends/reference/test/RefEndToEndTests.cpp | 34 ++++- src/backends/reference/workloads/Abs.cpp | 23 --- src/backends/reference/workloads/Abs.hpp | 23 +-- src/backends/reference/workloads/Broadcast.cpp | 21 ++- src/backends/reference/workloads/Broadcast.hpp | 35 ++++- src/backends/reference/workloads/CMakeLists.txt | 10 +- .../reference/workloads/ElementwiseFunction.cpp | 58 ++++--- .../reference/workloads/ElementwiseFunction.hpp | 26 +++- src/backends/reference/workloads/Exp.hpp | 22 +++ .../reference/workloads/RefAbsWorkload.cpp | 37 ----- .../reference/workloads/RefAbsWorkload.hpp | 21 --- .../reference/workloads/RefComparisonWorkload.cpp | 12 +- .../workloads/RefElementwiseUnaryWorkload.cpp | 95 ++++++++++++ .../workloads/RefElementwiseUnaryWorkload.hpp | 33 ++++ .../reference/workloads/RefElementwiseWorkload.cpp | 12 +- .../reference/workloads/RefElementwiseWorkload.hpp | 4 +- .../reference/workloads/RefRsqrtWorkload.cpp | 37 ----- .../reference/workloads/RefRsqrtWorkload.hpp | 21 --- src/backends/reference/workloads/RefWorkloads.hpp | 4 +- src/backends/reference/workloads/Rsqrt.cpp | 25 --- src/backends/reference/workloads/Rsqrt.hpp | 23 +-- src/backends/reference/workloads/Sqrt.hpp | 22 +++ 94 files changed, 1343 insertions(+), 913 deletions(-) create mode 100644 src/armnn/layers/ElementwiseUnaryLayer.cpp create mode 100644 src/armnn/layers/ElementwiseUnaryLayer.hpp delete mode 100644 src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp delete mode 100644 src/backends/reference/workloads/Abs.cpp create mode 100644 src/backends/reference/workloads/Exp.hpp delete mode 100644 src/backends/reference/workloads/RefAbsWorkload.cpp delete mode 100644 src/backends/reference/workloads/RefAbsWorkload.hpp create mode 100644 src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp create mode 100644 src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp delete mode 100644 src/backends/reference/workloads/RefRsqrtWorkload.cpp delete mode 100644 src/backends/reference/workloads/RefRsqrtWorkload.hpp delete mode 100644 src/backends/reference/workloads/Rsqrt.cpp create mode 100644 src/backends/reference/workloads/Sqrt.hpp diff --git a/Android.mk b/Android.mk index 60d1f7ba58..86f1602f15 100644 --- a/Android.mk +++ b/Android.mk @@ -120,7 +120,6 @@ LOCAL_SRC_FILES := \ src/armnnUtils/NetworkSockets.cpp \ src/armnnUtils/Filesystem.cpp \ src/armnnUtils/Processes.cpp \ - src/armnn/layers/AbsLayer.cpp \ src/armnn/layers/ActivationLayer.cpp \ src/armnn/layers/AdditionLayer.cpp \ src/armnn/layers/ArgMinMaxLayer.cpp \ @@ -139,6 +138,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/DetectionPostProcessLayer.cpp \ src/armnn/layers/DivisionLayer.cpp \ src/armnn/layers/ElementwiseBaseLayer.cpp \ + src/armnn/layers/ElementwiseUnaryLayer.cpp \ src/armnn/layers/FakeQuantizationLayer.cpp \ src/armnn/layers/FloorLayer.cpp \ src/armnn/layers/FullyConnectedLayer.cpp \ @@ -166,7 +166,6 @@ LOCAL_SRC_FILES := \ src/armnn/layers/QuantizedLstmLayer.cpp \ src/armnn/layers/ReshapeLayer.cpp \ src/armnn/layers/ResizeLayer.cpp \ - src/armnn/layers/RsqrtLayer.cpp \ src/armnn/layers/SliceLayer.cpp \ src/armnn/layers/SoftmaxLayer.cpp \ src/armnn/layers/SpaceToBatchNdLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 14c2c0c90f..91b9909a08 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -244,8 +244,6 @@ list(APPEND armnn_sources include/armnn/Version.hpp src/armnn/layers/LayerCloneBase.hpp src/armnn/layers/LayerWithParameters.hpp - src/armnn/layers/AbsLayer.hpp - src/armnn/layers/AbsLayer.cpp src/armnn/layers/ActivationLayer.hpp src/armnn/layers/ActivationLayer.cpp src/armnn/layers/AdditionLayer.hpp @@ -280,6 +278,8 @@ list(APPEND armnn_sources src/armnn/layers/DetectionPostProcessLayer.cpp src/armnn/layers/ElementwiseBaseLayer.hpp src/armnn/layers/ElementwiseBaseLayer.cpp + src/armnn/layers/ElementwiseUnaryLayer.hpp + src/armnn/layers/ElementwiseUnaryLayer.cpp src/armnn/layers/FakeQuantizationLayer.hpp src/armnn/layers/FakeQuantizationLayer.cpp src/armnn/layers/FloorLayer.hpp @@ -336,8 +336,6 @@ list(APPEND armnn_sources src/armnn/layers/ReshapeLayer.cpp src/armnn/layers/ResizeLayer.hpp src/armnn/layers/ResizeLayer.cpp - src/armnn/layers/RsqrtLayer.cpp - src/armnn/layers/RsqrtLayer.hpp src/armnn/layers/SliceLayer.cpp src/armnn/layers/SliceLayer.hpp src/armnn/layers/SoftmaxLayer.hpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index ba9a56ad38..45c0f421f3 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -78,6 +78,26 @@ struct ComparisonDescriptor ComparisonOperation m_Operation; }; +/// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer +struct ElementwiseUnaryDescriptor +{ + ElementwiseUnaryDescriptor() + : ElementwiseUnaryDescriptor(UnaryOperation::Abs) + {} + + ElementwiseUnaryDescriptor(UnaryOperation operation) + : m_Operation(operation) + {} + + bool operator ==(const ElementwiseUnaryDescriptor &rhs) const + { + return m_Operation == rhs.m_Operation; + } + + /// Specifies the elementwiseUnary operation to execute + UnaryOperation m_Operation; +}; + /// A PermuteDescriptor for the PermuteLayer. struct PermuteDescriptor { diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index cfdef8a030..d03c61d452 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -16,6 +16,7 @@ struct ComparisonDescriptor; struct Convolution2dDescriptor; struct DepthwiseConvolution2dDescriptor; struct DetectionPostProcessDescriptor; +struct ElementwiseUnaryDescriptor; struct FakeQuantizationDescriptor; struct FullyConnectedDescriptor; struct InstanceNormalizationDescriptor; diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 452200291e..1615d3e24e 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -27,6 +27,7 @@ protected: virtual ~ILayerSupport() {} public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") virtual bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; @@ -133,6 +134,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") virtual bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, @@ -292,6 +298,7 @@ public: const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") virtual bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index 9669b3a7cb..46f9e5698f 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -24,6 +24,7 @@ public: /// function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. /// @param name - Optional name for the layer. + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") virtual void VisitAbsLayer(const IConnectableLayer* layer, const char* name = nullptr) = 0; @@ -168,6 +169,14 @@ public: virtual void VisitDivisionLayer(const IConnectableLayer* layer, const char* name = nullptr) = 0; + /// Function a ElementwiseUnary layer should call back to when its Accept(ILayerVisitor&) function is invoked. + /// @param layer - pointer to the layer which is calling back to this visit function. + /// @param elementwiseUnaryDescriptor - Description of the layer. + /// @param name - Optional name for the layer. + virtual void VisitElementwiseUnaryLayer(const IConnectableLayer* layer, + const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, + const char* name = nullptr) = 0; + /// Function an Equal layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. /// @param name - Optional name for the layer. @@ -388,6 +397,7 @@ public: /// function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. /// @param name - Optional name for the layer. + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") virtual void VisitRsqrtLayer(const IConnectableLayer* layer, const char* name = nullptr) = 0; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 647f072804..1b1c874f8c 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -196,6 +196,13 @@ public: const ConstTensor& anchors, const char* name = nullptr) = 0; + /// Add an ElementwiseUnary layer to the network. + /// @param name - Optional name for the layer. + /// @param desc - Descriptor for the elementwiseUnary operation. + /// @ return - Interface for configuring the layer. + virtual IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, + const char* name = nullptr) = 0; + /// Adds a fully connected layer to the network. /// @param fullyConnectedDescriptor - Description of the fully connected layer. /// @param weights - Tensor for the weights data. @@ -297,6 +304,7 @@ public: /// Add absolute layer to the network. /// @param name - Optional name for the layer. /// @ return - Interface for configuring the layer. + ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead") virtual IConnectableLayer* AddAbsLayer(const char* name = nullptr) = 0; /// Adds an addition layer to the network. @@ -474,6 +482,7 @@ public: /// Add Reciprocal of square root layer to the network. /// @param name - Optional name for the layer. /// @ return - Interface for configuring the layer. + ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead") virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0; /// Add Gather layer to the network. diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index 388fc6f922..6fd9a66c76 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -94,6 +94,10 @@ public: void VisitDivisionLayer(const IConnectableLayer*, const char*) override { DefaultPolicy::Apply(__func__); } + void VisitElementwiseUnaryLayer(const IConnectableLayer*, + const ElementwiseUnaryDescriptor&, + const char*) override { DefaultPolicy::Apply(__func__); } + void VisitEqualLayer(const IConnectableLayer*, const char*) override { DefaultPolicy::Apply(__func__); } diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index 5ea214e1dc..1ab5660109 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -80,6 +80,15 @@ enum class ComparisonOperation NotEqual = 5 }; +enum class UnaryOperation +{ + Abs = 0, + Exp = 1, + Sqrt = 2, + Rsqrt = 3, + Neg = 4 +}; + enum class PoolingAlgorithm { Max = 0, diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp index 8157d4f043..790f57a432 100644 --- a/include/armnn/TypesUtils.hpp +++ b/include/armnn/TypesUtils.hpp @@ -66,6 +66,19 @@ constexpr char const* GetComparisonOperationAsCString(ComparisonOperation operat } } +constexpr char const* GetUnaryOperationAsCString(UnaryOperation operation) +{ + switch (operation) + { + case UnaryOperation::Abs: return "Abs"; + case UnaryOperation::Exp: return "Exp"; + case UnaryOperation::Sqrt: return "Sqrt"; + case UnaryOperation::Rsqrt: return "Rsqrt"; + case UnaryOperation::Neg: return "Neg"; + default: return "Unknown"; + } +} + constexpr char const* GetPoolingAlgorithmAsCString(PoolingAlgorithm pooling) { switch (pooling) diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 8c2a0f77e0..10e7f501b7 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -14,7 +14,6 @@ char const* GetLayerTypeAsCString(LayerType type) { switch (type) { - case LayerType::Abs: return "Abs"; case LayerType::Activation: return "Activation"; case LayerType::Addition: return "Addition"; case LayerType::ArgMinMax: return "ArgMinMax"; @@ -32,6 +31,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Dequantize: return "Dequantize"; case LayerType::DetectionPostProcess: return "DetectionPostProcess"; case LayerType::Division: return "Division"; + case LayerType::ElementwiseUnary: return "ElementwiseUnary"; case LayerType::FakeQuantization: return "FakeQuantization"; case LayerType::Floor: return "Floor"; case LayerType::FullyConnected: return "FullyConnected"; @@ -58,7 +58,6 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Quantize: return "Quantize"; case LayerType::QuantizedLstm: return "QuantizedLstm"; case LayerType::Reshape: return "Reshape"; - case LayerType::Rsqrt: return "Rsqrt"; case LayerType::Resize: return "Resize"; case LayerType::Slice: return "Slice"; case LayerType::Softmax: return "Softmax"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 36e7280e96..2d7be3cac6 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -14,8 +14,7 @@ namespace armnn enum class LayerType { FirstLayer, - Abs = FirstLayer, - Activation, + Activation = FirstLayer, Addition, ArgMinMax, BatchNormalization, @@ -32,6 +31,7 @@ enum class LayerType Dequantize, DetectionPostProcess, Division, + ElementwiseUnary, FakeQuantization, Floor, FullyConnected, @@ -59,7 +59,6 @@ enum class LayerType QuantizedLstm, Reshape, Resize, - Rsqrt, Slice, Softmax, SpaceToBatchNd, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index dac88385b1..08d91fc20b 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -570,7 +570,11 @@ bool IsRsqrtSupported(const BackendId& backend, char* reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output); + FORWARD_LAYER_SUPPORT_FUNC(backend, + IsElementwiseUnarySupported, + input, + output, + ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt)); } bool IsSoftmaxSupported(const BackendId& backend, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 13bf900dca..2d486f48a6 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -6,7 +6,6 @@ #include "InternalTypes.hpp" -#include "layers/AbsLayer.hpp" #include "layers/ActivationLayer.hpp" #include "layers/AdditionLayer.hpp" #include "layers/ArgMinMaxLayer.hpp" @@ -24,6 +23,7 @@ #include "layers/DequantizeLayer.hpp" #include "layers/DetectionPostProcessLayer.hpp" #include "layers/DivisionLayer.hpp" +#include "layers/ElementwiseUnaryLayer.hpp" #include "layers/FakeQuantizationLayer.hpp" #include "layers/FloorLayer.hpp" #include "layers/FullyConnectedLayer.hpp" @@ -51,7 +51,6 @@ #include "layers/QuantizedLstmLayer.hpp" #include "layers/ReshapeLayer.hpp" #include "layers/ResizeLayer.hpp" -#include "layers/RsqrtLayer.hpp" #include "layers/SliceLayer.hpp" #include "layers/SoftmaxLayer.hpp" #include "layers/SpaceToBatchNdLayer.hpp" @@ -91,7 +90,6 @@ constexpr LayerType LayerEnumOf(const T* = nullptr); #define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName) -DECLARE_LAYER(Abs) DECLARE_LAYER(Activation) DECLARE_LAYER(Addition) DECLARE_LAYER(ArgMinMax) @@ -109,6 +107,7 @@ DECLARE_LAYER(DepthwiseConvolution2d) DECLARE_LAYER(Dequantize) DECLARE_LAYER(DetectionPostProcess) DECLARE_LAYER(Division) +DECLARE_LAYER(ElementwiseUnary) DECLARE_LAYER(FakeQuantization) DECLARE_LAYER(Floor) DECLARE_LAYER(FullyConnected) @@ -136,7 +135,6 @@ DECLARE_LAYER(Quantize) DECLARE_LAYER(QuantizedLstm) DECLARE_LAYER(Reshape) DECLARE_LAYER(Resize) -DECLARE_LAYER(Rsqrt) DECLARE_LAYER(Slice) DECLARE_LAYER(Softmax) DECLARE_LAYER(SpaceToBatchNd) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 43c79c8479..7edc6240a1 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -967,6 +967,12 @@ IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& compa return m_Graph->AddLayer(comparisonDescriptor, name); } +IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, + const char* name) +{ + return m_Graph->AddLayer(elementwiseUnaryDescriptor, name); +} + IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, const ConstTensor& weights, const Optional& biases, @@ -1200,7 +1206,7 @@ IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescrip IConnectableLayer* Network::AddAbsLayer(const char * name) { - return m_Graph->AddLayer(name); + return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name); } IConnectableLayer* Network::AddAdditionLayer(const char* name) @@ -1475,7 +1481,7 @@ IConnectableLayer* Network::AddEqualLayer(const char* name) IConnectableLayer* Network::AddRsqrtLayer(const char * name) { - return m_Graph->AddLayer(name); + return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name); } IConnectableLayer* Network::AddGatherLayer(const char* name) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 0a11941340..23a8e47093 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -95,6 +95,9 @@ public: const ConstTensor& anchors, const char* name = nullptr) override; + IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, + const char* name = nullptr) override; + IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, const ConstTensor& weights, const Optional& biases, @@ -137,6 +140,7 @@ public: IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead") IConnectableLayer* AddAbsLayer(const char* name = nullptr) override; IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override; @@ -208,6 +212,7 @@ public: ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead") IConnectableLayer* AddEqualLayer(const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead") IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override; IConnectableLayer* AddMergeLayer(const char* name = nullptr) override; diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index 4b80b02e34..51818ebddd 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -115,9 +115,7 @@ void QuantizerVisitor::RecordLayer(const IConnectableLayer* srcLayer, IConnectab void QuantizerVisitor::VisitAbsLayer(const IConnectableLayer* layer, const char* name) { - IConnectableLayer* newLayer = m_QuantizedNetwork->AddAbsLayer(name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); + VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Abs), name); } void QuantizerVisitor::VisitActivationLayer(const IConnectableLayer* layer, @@ -275,6 +273,15 @@ void QuantizerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* SetQuantizedInputConnections(layer, newLayer); } +void QuantizerVisitor::VisitElementwiseUnaryLayer(const IConnectableLayer* layer, + const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, + const char* name) +{ + IConnectableLayer* newLayer = m_QuantizedNetwork->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name); + RecordLayer(layer, newLayer); + SetQuantizedInputConnections(layer, newLayer); +} + void QuantizerVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer, const FullyConnectedDescriptor& desc, const ConstTensor& weights, @@ -450,12 +457,9 @@ void QuantizerVisitor::VisitResizeLayer(const IConnectableLayer* layer, SetQuantizedInputConnections(layer, newLayer); } -void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer, - const char* name) +void QuantizerVisitor::VisitRsqrtLayer(const IConnectableLayer* layer, const char* name) { - IConnectableLayer* newLayer = m_QuantizedNetwork->AddRsqrtLayer(name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); + VisitElementwiseUnaryLayer(layer, ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name); } void QuantizerVisitor::VisitSliceLayer(const IConnectableLayer* layer, diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp index db0134d7a4..4013033697 100644 --- a/src/armnn/QuantizerVisitor.hpp +++ b/src/armnn/QuantizerVisitor.hpp @@ -32,6 +32,7 @@ public: ~QuantizerVisitor() = default; /// Functions to quantize the individual layers, overridden from ILayerVisitor + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") void VisitAbsLayer(const IConnectableLayer* layer, const char* name = nullptr) override; void VisitActivationLayer(const IConnectableLayer* layer, @@ -78,13 +79,16 @@ public: const DepthToSpaceDescriptor& depthToSpaceDescriptor, const char* name = nullptr) override; - void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer, const DepthwiseConvolution2dDescriptor& desc, const ConstTensor& weights, const Optional& biases, const char* name = nullptr) override; + void VisitElementwiseUnaryLayer(const IConnectableLayer* layer, + const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor, + const char* name = nullptr) override; + void VisitFullyConnectedLayer(const IConnectableLayer *layer, const FullyConnectedDescriptor& desc, const ConstTensor& weights, @@ -142,6 +146,7 @@ public: const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") void VisitRsqrtLayer(const IConnectableLayer*, const char* name = nullptr) override; diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp new file mode 100644 index 0000000000..d3843da060 --- /dev/null +++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp @@ -0,0 +1,62 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ElementwiseUnaryLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include +#include + +#include + +namespace armnn +{ + +ElementwiseUnaryLayer::ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::ElementwiseUnary, param, name) +{ +} + +std::unique_ptr ElementwiseUnaryLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + ElementwiseUnaryQueueDescriptor descriptor; + return factory.CreateElementwiseUnary(descriptor, PrepInfoAndDesc(descriptor)); +} + +ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const +{ + return CloneBase(graph, m_Param, GetName()); +} + +std::vector ElementwiseUnaryLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + // Should return the shape of the input tensor + BOOST_ASSERT(inputShapes.size() == 1); + const TensorShape& input = inputShapes[0]; + + return std::vector({ input }); +} + +void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + std::vector inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp new file mode 100644 index 0000000000..850a814b6e --- /dev/null +++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp @@ -0,0 +1,48 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a elementwiseUnary operation. +class ElementwiseUnaryLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the elementwiseUnary type + /// @param [in] graph The graph where this layer can be found + /// @param [in] factory The workload factory which will create the workload + /// @return A pointer to the created workload, or nullptr if not created + virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer + /// @param [in] graph The graph into which this layer is being cloned + ElementwiseUnaryLayer* Clone(Graph& graph) const override; + + /// Returns inputShapes by default. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + /// Check if the input tensor shape(s) will lead to a valid configuration + /// of @ref ElementwiseUnaryLayer + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a ElementwiseUnaryLayer + /// @param [in] param ElementwiseUnaryDescriptor to configure the ElementwiseUnaryLayer + /// @param [in] name Optional name for the layer + ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& param, const char* name); + + /// Default destructor + ~ElementwiseUnaryLayer() = default; +}; + +} // namespace armnn diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 02ce12a304..4782c432a2 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -131,14 +131,15 @@ std::unique_ptr CreateElementwiseWorkloadTest(armnn::IWorkloadFact return workload; } -template std::unique_ptr CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory, - armnn::Graph & graph) + armnn::Graph & graph, + armnn::UnaryOperation op) { - Layer* const layer = graph.AddLayer("layer"); + ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op); + Layer* const layer = graph.AddLayer(desc, "layer"); Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); @@ -1059,34 +1060,6 @@ std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloa return workload; } -template -std::unique_ptr CreateRsqrtWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) -{ - Layer* const layer = graph.AddLayer("rsqrt"); - - // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); - Layer* const output = graph.AddLayer(0, "output"); - - // Connects up. - armnn::TensorInfo tensorInfo({1, 1}, DataType); - - Connect(input, layer, tensorInfo); - Connect(layer, output, tensorInfo); - - CreateTensorHandles(graph, factory); - - // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, factory); - - RsqrtQueueDescriptor queueDescriptor = workload->GetData(); - BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); - BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); - - return workload; -} - template std::unique_ptr CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 52beb630f9..d568b2cbc0 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -1672,61 +1672,6 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant) VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); } -BOOST_AUTO_TEST_CASE(QuantizeAbs) -{ - class TestAbsQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestAbsQuantization(const TensorShape& inputShape, const TensorShape& outputShape) : - TestLeakyReLuActivationQuantization(inputShape, outputShape) - {} - - TestAbsQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) : - TestLeakyReLuActivationQuantization(options, inputShape, outputShape) - {} - - void VisitAbsLayer(const IConnectableLayer *layer, - const char *name = nullptr) override - { - boost::ignore_unused(name); - TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo(); - - TestQuantizationParams(outputInfo, - { 30.0f / g_Asymm8QuantizationBase, 128 }, - { 15.0f / g_Symm8QuantizationBase, 0}, - { 15.0f / g_Symm16QuantizationBase, 0 }); - } - }; - - INetworkPtr network = INetwork::Create(); - - //Add the layer being tested - IConnectableLayer* absLayer = network->AddAbsLayer(); - - const TensorShape shape{1U}; - TensorInfo info(shape, DataType::Float32); - - IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info); - - CompleteLeakyReluNetwork(network.get(), activation, absLayer, info); - - INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestAbsQuantization validatorQAsymm8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8); - - const QuantizerOptions qSymm8Options(DataType::QSymmS8); - INetworkPtr quantizedNetworkQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options)->ExportNetwork(); - TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8); - - const QuantizerOptions qSymm16options(DataType::QSymmS16); - INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(); - TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); -} - BOOST_AUTO_TEST_CASE(QuantizeArgMinMax) { class TestArgMinMaxQuantization : public TestQuantization diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp index 36bbd36792..efe50a5b58 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp @@ -85,6 +85,12 @@ armnn::ConcatDescriptor GetDescriptor() return descriptor; } +template<> +armnn::ElementwiseUnaryDescriptor GetDescriptor() +{ + return armnn::ElementwiseUnaryDescriptor(armnn::UnaryOperation::Abs); +} + template<> armnn::InstanceNormalizationDescriptor GetDescriptor() { @@ -251,6 +257,7 @@ TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(DepthToSpace) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(BatchToSpaceNd) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Comparison) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Concat) +TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ElementwiseUnary) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax) diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp index 221057cbdc..f792bc3554 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp @@ -48,6 +48,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(BatchToSpaceNd) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Comparison) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace) +DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseUnary) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(InstanceNormalization) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(L2Normalization) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(LogSoftmax) diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp index 32de94e7ef..0653b39e58 100644 --- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp +++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp @@ -38,7 +38,6 @@ TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name) BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor) -TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Abs) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Addition) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Dequantize) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Division) @@ -50,7 +49,6 @@ TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Minimum) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize) -TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rsqrt) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction) TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch) diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp index c770b5e9e0..84dfdd6539 100644 --- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp +++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp @@ -25,7 +25,6 @@ public: \ } // anonymous namespace -DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Abs) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Addition) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Dequantize) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Division) @@ -37,6 +36,5 @@ DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Minimum) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Multiplication) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Prelu) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Quantize) -DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Rsqrt) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Subtraction) DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Switch) diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 6077d057c4..99ee0b5b2d 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -203,6 +203,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_DequantizeLayer] = &Deserializer::ParseDequantize; m_ParserFunctions[Layer_DetectionPostProcessLayer] = &Deserializer::ParseDetectionPostProcess; m_ParserFunctions[Layer_DivisionLayer] = &Deserializer::ParseDivision; + m_ParserFunctions[Layer_ElementwiseUnaryLayer] = &Deserializer::ParseElementwiseUnary; m_ParserFunctions[Layer_EqualLayer] = &Deserializer::ParseEqual; m_ParserFunctions[Layer_FullyConnectedLayer] = &Deserializer::ParseFullyConnected; m_ParserFunctions[Layer_FloorLayer] = &Deserializer::ParseFloor; @@ -457,6 +458,25 @@ armnn::ComparisonOperation ToComparisonOperation(armnnSerializer::ComparisonOper } } +armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation) +{ + switch (operation) + { + case armnnSerializer::UnaryOperation::UnaryOperation_Abs: + return armnn::UnaryOperation::Abs; + case armnnSerializer::UnaryOperation::UnaryOperation_Rsqrt: + return armnn::UnaryOperation::Rsqrt; + case armnnSerializer::UnaryOperation::UnaryOperation_Sqrt: + return armnn::UnaryOperation::Sqrt; + case armnnSerializer::UnaryOperation::UnaryOperation_Exp: + return armnn::UnaryOperation::Exp; + case armnnSerializer::UnaryOperation::UnaryOperation_Neg: + return armnn::UnaryOperation::Neg; + default: + throw armnn::InvalidArgumentException("Unary operation unknown"); + } +} + armnn::ResizeMethod ToResizeMethod(armnnSerializer::ResizeMethod method) { switch (method) @@ -926,7 +946,8 @@ void Deserializer::ParseAbs(armnnDeserializer::Deserializer::GraphPtr graph, uns auto layerName = GetLayerName(graph, layerIndex); - IConnectableLayer* layer = m_Network->AddAbsLayer(layerName.c_str()); + armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Abs); + IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str()); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1496,6 +1517,33 @@ void Deserializer::ParseComparison(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + CHECK_LOCATION(); + + auto inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_ElementwiseUnaryLayer(); + auto fbDescriptor = fbLayer->descriptor(); + + armnn::ElementwiseUnaryDescriptor descriptor; + descriptor.m_Operation = ToUnaryOperation(fbDescriptor->operation()); + + const std::string& layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); @@ -2135,8 +2183,9 @@ void Deserializer::ParseRsqrt(GraphPtr graph, unsigned int layerIndex) CHECK_VALID_SIZE(outputs.size(), 1); auto layerName = GetLayerName(graph, layerIndex); - IConnectableLayer* layer = m_Network->AddRsqrtLayer(layerName.c_str()); + armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Rsqrt); + IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str()); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index babb56e70e..ae8be6e932 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -92,6 +92,7 @@ private: void ParseDequantize(GraphPtr graph, unsigned int layerIndex); void ParseDetectionPostProcess(GraphPtr graph, unsigned int layerIndex); void ParseDivision(GraphPtr graph, unsigned int layerIndex); + void ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex); void ParseEqual(GraphPtr graph, unsigned int layerIndex); void ParseFloor(GraphPtr graph, unsigned int layerIndex); void ParseFullyConnected(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 0d30d96452..0f8a816093 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -147,7 +147,8 @@ enum LayerType : uint { InstanceNormalization = 50, LogSoftmax = 51, Comparison = 52, - StandIn = 53 + StandIn = 53, + ElementwiseUnary = 54 } // Base layer table to be used as part of other layers @@ -166,6 +167,7 @@ table BindableLayerBase { // Table for each layer defined below +/// @deprecated Use ElementwiseUnaryLayer instead table AbsLayer { base:LayerBase; } @@ -252,6 +254,23 @@ table DivisionLayer { base:LayerBase; } +enum UnaryOperation : byte { + Abs = 0, + Rsqrt = 1, + Sqrt = 2, + Exp = 3, + Neg = 4 +} + +table ElementwiseUnaryDescriptor { + operation:UnaryOperation; +} + +table ElementwiseUnaryLayer { + base:LayerBase; + descriptor:ElementwiseUnaryDescriptor; +} + /// @deprecated Use ComparisonLayer instead table EqualLayer { base:LayerBase; @@ -502,6 +521,7 @@ table PadDescriptor { padValue:float = 0; } +/// @deprecated Use ElementwiseUnaryLayer instead table RsqrtLayer { base:LayerBase; } @@ -798,7 +818,8 @@ union Layer { InstanceNormalizationLayer, LogSoftmaxLayer, ComparisonLayer, - StandInLayer + StandInLayer, + ElementwiseUnaryLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 91b62413d8..13ea0f04a9 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -439,6 +439,21 @@ void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer); } +void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer, + const armnn::ElementwiseUnaryDescriptor& descriptor, + const char* name) +{ + boost::ignore_unused(name); + + auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary); + auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor( + m_flatBufferBuilder, + GetFlatBufferUnaryOperation(descriptor.m_Operation)); + + auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor); + CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer); +} + void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name) { boost::ignore_unused(name); diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 7dfd534081..d92c93d46c 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -44,6 +44,7 @@ public: return m_serializedLayers; } + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") void VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; @@ -109,6 +110,10 @@ public: void VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; + void VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer, + const armnn::ElementwiseUnaryDescriptor& descriptor, + const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead") void VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; @@ -210,6 +215,7 @@ public: const armnn::ResizeBilinearDescriptor& resizeDescriptor, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead") void VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index 4fc880a856..2f77a8e778 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -6,12 +6,12 @@ This reference guide provides a list of layers which can be serialized currently The Arm NN SDK Serializer currently supports the following layers: -* Abs * Activation * Addition * ArgMinMax * BatchToSpaceNd * BatchNormalization +* Comparison * Concat * Constant * Convolution2d @@ -20,6 +20,7 @@ The Arm NN SDK Serializer currently supports the following layers: * Dequantize * DetectionPostProcess * Division +* ElementwiseUnary * Floor * FullyConnected * Gather @@ -43,7 +44,6 @@ The Arm NN SDK Serializer currently supports the following layers: * QuantizedLstm * Reshape * Resize -* Rsqrt * Slice * Softmax * SpaceToBatchNd @@ -66,3 +66,6 @@ Some layers have been deprecated and replaced by others layers. In order to main * Merger will deserialize as Concat * Greater will deserialize as Comparison * ResizeBilinear will deserialize as Resize +* Abs will deserialize as ElementwiseUnary +* Rsqrt will deserialize as ElementwiseUnary + diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp index df1ef285de..02a5ed3872 100644 --- a/src/armnnSerializer/SerializerUtils.cpp +++ b/src/armnnSerializer/SerializerUtils.cpp @@ -79,6 +79,25 @@ armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout } } +armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation) +{ + switch (comparisonOperation) + { + case armnn::UnaryOperation::Abs: + return armnnSerializer::UnaryOperation::UnaryOperation_Abs; + case armnn::UnaryOperation::Rsqrt: + return armnnSerializer::UnaryOperation::UnaryOperation_Rsqrt; + case armnn::UnaryOperation::Sqrt: + return armnnSerializer::UnaryOperation::UnaryOperation_Sqrt; + case armnn::UnaryOperation::Exp: + return armnnSerializer::UnaryOperation::UnaryOperation_Exp; + case armnn::UnaryOperation::Neg: + return armnnSerializer::UnaryOperation::UnaryOperation_Neg; + default: + throw armnn::InvalidArgumentException("Unary operation unknown"); + } +} + armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm) { switch (poolingAlgorithm) diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp index 48553334ba..edd48a5e25 100644 --- a/src/armnnSerializer/SerializerUtils.hpp +++ b/src/armnnSerializer/SerializerUtils.hpp @@ -18,6 +18,8 @@ armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType); armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout); +armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation); + armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm); armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding( diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 2b13109725..47804fe328 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -268,32 +268,6 @@ static std::vector GenerateRandomData(size_t size) BOOST_AUTO_TEST_SUITE(SerializerTests) -BOOST_AUTO_TEST_CASE(SerializeAbs) -{ - DECLARE_LAYER_VERIFIER_CLASS(Abs) - - const std::string layerName("abs"); - const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32); - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - - armnn::IConnectableLayer* const absLayer = network->AddAbsLayer(layerName.c_str()); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); - - inputLayer->GetOutputSlot(0).Connect(absLayer->GetInputSlot(0)); - absLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - - inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - absLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - AbsLayerVerifier verifier(layerName, {tensorInfo}, {tensorInfo}); - deserializedNetwork->Accept(verifier); -} - BOOST_AUTO_TEST_CASE(SerializeAddition) { DECLARE_LAYER_VERIFIER_CLASS(Addition) @@ -2176,31 +2150,6 @@ BOOST_AUTO_TEST_CASE(EnsureResizeBilinearBackwardCompatibility) deserializedNetwork->Accept(verifier); } -BOOST_AUTO_TEST_CASE(SerializeRsqrt) -{ - DECLARE_LAYER_VERIFIER_CLASS(Rsqrt) - - const std::string layerName("rsqrt"); - const armnn::TensorInfo tensorInfo({ 3, 1, 2 }, armnn::DataType::Float32); - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - armnn::IConnectableLayer* const rsqrtLayer = network->AddRsqrtLayer(layerName.c_str()); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); - - inputLayer->GetOutputSlot(0).Connect(rsqrtLayer->GetInputSlot(0)); - rsqrtLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - - inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - rsqrtLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - RsqrtLayerVerifier verifier(layerName, {tensorInfo}, {tensorInfo}); - deserializedNetwork->Accept(verifier); -} - BOOST_AUTO_TEST_CASE(SerializeSlice) { DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Slice) diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp index ca98f463b5..af86619249 100755 --- a/src/armnnTfParser/TfParser.cpp +++ b/src/armnnTfParser/TfParser.cpp @@ -2627,7 +2627,8 @@ ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef, std::vector inputs = GetInputParsedTfOperationsChecked(nodeDef, 1); - IConnectableLayer* const layer = m_Network->AddRsqrtLayer(nodeDef.name().c_str()); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); + IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str()); IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); prevLayerOutputSlot.Connect(layer->GetInputSlot(0)); diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 8332774202..b19356f955 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -3,7 +3,10 @@ // SPDX-License-Identifier: MIT // +#include +#include #include +#include #include @@ -195,6 +198,26 @@ bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsAbsSupported(input, output, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsRsqrtSupported(input, output, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END + } + return false; +} + bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/, const armnn::TensorInfo& /*input1*/, const armnn::TensorInfo& /*output*/, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 60f94d0c4d..7a65eb55ed 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -13,6 +13,7 @@ namespace armnn class LayerSupportBase : public ILayerSupport { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -119,6 +120,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, @@ -278,6 +284,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index fa5c6fe38e..d2ab41ef40 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2969,4 +2969,28 @@ void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } +void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); + + std::vector supportedTypes = + { + DataType::Float16, + DataType::Float32, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); +} + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 43be3cd6e1..c5fcf15c3b 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -560,4 +560,9 @@ struct ComparisonQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 54ae585a82..acb73b589d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -68,15 +68,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, switch(layer.GetType()) { - case LayerType::Abs: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Activation: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -294,6 +285,19 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::ElementwiseUnary: + { + auto cLayer = boost::polymorphic_downcast(&layer); + + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::FakeQuantization: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -807,15 +811,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } - case LayerType::Rsqrt: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Slice: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -1182,6 +1177,12 @@ std::unique_ptr IWorkloadFactory::CreateDivision(const DivisionQueueD return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*Info*/) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 6e6478fd6a..e1cdff6abe 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -51,6 +51,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const = 0; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") virtual std::unique_ptr CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -105,6 +106,9 @@ public: virtual std::unique_ptr CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + ARMNN_DEPRECATED_MSG("Use CreateComparison instead") virtual std::unique_ptr CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& Info) const; @@ -200,6 +204,7 @@ public: virtual std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") virtual std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 1947c6935b..9602cc3b6c 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -106,6 +106,22 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override + { + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absDescriptor; + return CreateAbs(absDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtDescriptor; + return CreateRsqrt(rsqrtDescriptor, info); + } + return nullptr; + } + std::unique_ptr CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 4461cd68b1..56a21b386c 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -50,6 +50,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/DepthToSpaceTestImpl.cpp \ test/layerTests/DequantizeTestImpl.cpp \ test/layerTests/DivisionTestImpl.cpp \ + test/layerTests/ElementwiseUnaryTestImpl.cpp \ test/layerTests/FakeQuantizationTestImpl.cpp \ test/layerTests/FloorTestImpl.cpp \ test/layerTests/FullyConnectedTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp deleted file mode 100644 index 602ccd6a19..0000000000 --- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp +++ /dev/null @@ -1,65 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "CommonTestUtils.hpp" - -#include -#include - - -namespace -{ - -armnn::INetworkPtr CreateAbsNetwork(const armnn::TensorInfo& tensorInfo) -{ - armnn::INetworkPtr network(armnn::INetwork::Create()); - - armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); - armnn::IConnectableLayer* absLayer = network->AddAbsLayer("abs"); - armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); - - Connect(inputLayer, absLayer, tensorInfo, 0, 0); - Connect(absLayer, outputLayer, tensorInfo, 0, 0); - - return network; -} - -} // anonymous namespace - -template> -void AbsEndToEnd(const std::vector& backends) -{ - using namespace armnn; - - const float qScale = IsQuantizedType() ? 0.25f : 1.0f; - const int32_t qOffset = IsQuantizedType() ? 50 : 0; - - TensorInfo tensorInfo({ 1, 1, 2, 3 }, ArmnnType, qScale, qOffset); - - std::vector inputData = - { - -1.f, 2.f, -3.f, - 4.f, -5.f, 6.f - }; - - std::vector expectedOutputData = - { - 1.f, 2.f, 3.f, - 4.f, 5.f, 6.f - }; - - // quantize data - std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); - std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); - - INetworkPtr network = CreateAbsNetwork(tensorInfo); - - EndToEndLayerTestImpl(std::move(network), - { { 0, qInputData } }, - { { 0, qExpectedOutputData } }, - backends); -} diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 82df782317..4716bd47e4 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -4,7 +4,6 @@ # list(APPEND armnnBackendsCommonUnitTests_sources - AbsEndToEndTestImpl.hpp ActivationFixture.hpp ArgMinMaxEndToEndTestImpl.hpp BackendIdTests.cpp @@ -19,6 +18,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources DetectionPostProcessEndToEndTestImpl.hpp DynamicBackendTests.cpp DynamicBackendTests.hpp + ElementwiseUnaryEndToEndTestImpl.hpp EndToEndTestImpl.hpp GatherEndToEndTestImpl.hpp InstanceNormalizationEndToEndTestImpl.cpp @@ -81,6 +81,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/DivisionTestImpl.cpp layerTests/DivisionTestImpl.hpp layerTests/ElementwiseTestImpl.hpp + layerTests/ElementwiseUnaryTestImpl.cpp + layerTests/ElementwiseUnaryTestImpl.hpp layerTests/FakeQuantizationTestImpl.cpp layerTests/FakeQuantizationTestImpl.hpp layerTests/FloorTestImpl.cpp diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp new file mode 100644 index 0000000000..4c93735bc8 --- /dev/null +++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp @@ -0,0 +1,77 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "CommonTestUtils.hpp" + +#include + +#include + +#include + +#include + +namespace +{ + +template +INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape, + const TensorShape& outputShape, + UnaryOperation operation, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr net(INetwork::Create()); + + ElementwiseUnaryDescriptor descriptor(operation); + IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary"); + + TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset); + IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast(0)); + Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0); + + TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + Connect(elementwiseUnaryLayer, output, outputTensorInfo, 0, 0); + + return net; +} + +template> +void ElementwiseUnarySimpleEndToEnd(const std::vector& backends, + UnaryOperation operation, + const std::vector expectedOutput) +{ + using namespace armnn; + + const float qScale = IsQuantizedType() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType() ? 50 : 0; + + const TensorShape& inputShape = { 2, 2, 2, 2 }; + const TensorShape& outputShape = { 2, 2, 2, 2 }; + + // Builds up the structure of the network + INetworkPtr net = CreateElementwiseUnaryNetwork(inputShape, outputShape, operation, qScale, qOffset); + + BOOST_TEST_CHECKPOINT("create a network"); + + const std::vector input({ 1, -1, 1, 1, 5, -5, 5, 5, + -3, 3, 3, 3, 4, 4, -4, 4 }); + + // quantize data + std::vector qInputData = armnnUtils::QuantizedVector(input, qScale, qOffset); + std::vector qExpectedOutput = armnnUtils::QuantizedVector(expectedOutput, qScale, qOffset); + + std::map> inputTensorData = {{ 0, qInputData }}; + std::map> expectedOutputData = {{ 0, qExpectedOutput }}; + + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 031210f1fc..e4ce7407bf 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -427,8 +427,6 @@ template struct LayerTypePolicy; // Every entry in the armnn::LayerType enum must be accounted for below. -DECLARE_LAYER_POLICY_1_PARAM(Abs) - DECLARE_LAYER_POLICY_2_PARAM(Activation) DECLARE_LAYER_POLICY_1_PARAM(Addition) @@ -465,6 +463,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Dequantize) DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess) +DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary) + DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization) DECLARE_LAYER_POLICY_1_PARAM(Floor) @@ -517,8 +517,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Resize) DECLARE_LAYER_POLICY_2_PARAM(Reshape) -DECLARE_LAYER_POLICY_1_PARAM(Rsqrt) - DECLARE_LAYER_POLICY_2_PARAM(Slice) DECLARE_LAYER_POLICY_2_PARAM(Softmax) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 05c307ead2..eba7944cc3 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp index cc57893439..7706809e8b 100644 --- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp @@ -4,85 +4,15 @@ // #include "AbsTestImpl.hpp" +#include "ElementwiseUnaryTestImpl.hpp" -#include -#include -#include - -#include - -namespace -{ - -template> -LayerTestResult Abs2dTestCommon( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo inputTensorInfo, - const armnn::TensorInfo outputTensorInfo, - const std::vector& inputValues, - const std::vector& expectedOutputValues) -{ - boost::ignore_unused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::AbsQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateAbs(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - -} // anonymous namespace - template LayerTestResult Abs2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 2, 2 }; - const armnn::TensorShape outputShape{ 2, 2 }; - - float qScale = 0.0625f; - int32_t qOffset = 64; - - if (ArmnnType == armnn::DataType::QSymmS16) - { - qScale = 0.1f; - qOffset = 0; - } - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + const unsigned int inputShape[] = { 2, 2 }; std::vector inputValues { @@ -98,9 +28,14 @@ LayerTestResult Abs2dTest( std::vector expectedOutputValues(inputValues.size()); std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f); - return Abs2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -108,27 +43,7 @@ LayerTestResult Abs3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - boost::ignore_unused(memoryManager); - - const armnn::TensorShape inputShape{ 3, 1, 2 }; - const armnn::TensorShape outputShape{ 3, 1, 2 }; - - float qScale = 0.0625f; - int32_t qOffset = 64; - - if (ArmnnType == armnn::DataType::QSymmS16) - { - qScale = 0.1f; - qOffset = 0; - } - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + const unsigned int inputShape[] = { 3, 1, 2 }; std::vector inputValues { @@ -143,35 +58,14 @@ LayerTestResult Abs3dTest( std::vectorexpectedOutputValues(inputValues.size()); std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::AbsQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateAbs(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); - - return result; + return ElementwiseUnaryTestHelper<3, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -179,14 +73,7 @@ LayerTestResult AbsZeroTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); + const unsigned int inputShape[] = { 1, 2 }; std::vector inputValues { @@ -198,9 +85,14 @@ LayerTestResult AbsZeroTest( 0.f, 0.f }; - return Abs2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } // diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp index c0a779c0e6..cbbe140843 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -202,4 +203,4 @@ LayerTestResult ElementwiseTestHelper( outValues, quantScale, quantOffset); -} +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp new file mode 100644 index 0000000000..a2c88a62e7 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp @@ -0,0 +1,14 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ElementwiseUnaryTestImpl.hpp" + +std::unique_ptr CreateWorkload( + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const armnn::ElementwiseUnaryQueueDescriptor& descriptor) +{ + return workloadFactory.CreateElementwiseUnary(descriptor, info); +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp new file mode 100644 index 0000000000..bea4ec205e --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp @@ -0,0 +1,113 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +std::unique_ptr CreateWorkload( + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const armnn::ElementwiseUnaryQueueDescriptor& descriptor); + +template > +LayerTestResult ElementwiseUnaryTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + armnn::UnaryOperation op, + const unsigned int shape[NumDims], + std::vector values, + float quantScale, + int quantOffset, + const unsigned int outShape[NumDims], + std::vector outValues, + float outQuantScale, + int outQuantOffset) +{ + armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType}; + armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType}; + + inputTensorInfo.SetQuantizationScale(quantScale); + inputTensorInfo.SetQuantizationOffset(quantOffset); + + outputTensorInfo.SetQuantizationScale(outQuantScale); + outputTensorInfo.SetQuantizationOffset(outQuantOffset); + + auto input = MakeTensor(inputTensorInfo, ConvertToDataType(values, inputTensorInfo)); + + LayerTestResult ret(outputTensorInfo); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ElementwiseUnaryDescriptor desc(op); + armnn::ElementwiseUnaryQueueDescriptor qDesc; + qDesc.m_Parameters = desc; + armnn::WorkloadInfo info; + AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get()); + auto workload = CreateWorkload(workloadFactory, info, qDesc); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.origin()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + + ret.outputExpected = MakeTensor(outputTensorInfo, ConvertToDataType(outValues, + inputTensorInfo)); + return ret; +} + +template > +LayerTestResult ElementwiseUnaryTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + armnn::UnaryOperation op, + const unsigned int shape[NumDims], + std::vector values, + const unsigned int outShape[NumDims], + std::vector outValues, + float quantScale = 1.0f, + int quantOffset = 0) +{ + return ElementwiseUnaryTestHelper( + workloadFactory, + memoryManager, + op, + shape, + values, + quantScale, + quantOffset, + outShape, + outValues, + quantScale, + quantOffset); +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp index db928cf2e0..ca423835dc 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp @@ -4,76 +4,15 @@ // #include "ReshapeTestImpl.hpp" +#include "ElementwiseUnaryTestImpl.hpp" -#include -#include -#include - -#include - -namespace -{ - -template> -LayerTestResult Rsqrt2dTestCommon( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo inputTensorInfo, - const armnn::TensorInfo outputTensorInfo, - const std::vector& inputValues, - const std::vector& expectedOutputValues) -{ - boost::ignore_unused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::RsqrtQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateRsqrt(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - -} // anonymous namespace - template LayerTestResult Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 2, 2 }; - const armnn::TensorShape outputShape{ 2, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 2, 2 }; std::vector inputValues { @@ -87,9 +26,14 @@ LayerTestResult Rsqrt2dTest( 0.25f, 0.2f }; - return Rsqrt2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -97,17 +41,7 @@ LayerTestResult Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - boost::ignore_unused(memoryManager); - const armnn::TensorShape inputShape{ 3, 1, 2 }; - const armnn::TensorShape outputShape{ 3, 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 3, 1, 2 }; std::vector inputValues { @@ -121,35 +55,14 @@ LayerTestResult Rsqrt3dTest( 0.2f, 0.125f, 0.1f }; - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::RsqrtQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateRsqrt(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); - - return result; + return ElementwiseUnaryTestHelper<3, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -157,14 +70,7 @@ LayerTestResult RsqrtZeroTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); + const unsigned int inputShape[] = { 1, 2 }; std::vector inputValues { @@ -176,9 +82,14 @@ LayerTestResult RsqrtZeroTest( INFINITY, -INFINITY }; - return Rsqrt2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -186,16 +97,7 @@ LayerTestResult RsqrtNegativeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 1, 2 }; std::vector inputValues { @@ -207,9 +109,14 @@ LayerTestResult RsqrtNegativeTest( -NAN, -NAN }; - return Rsqrt2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } // diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index f7129d6035..f8cc5074b3 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -160,10 +160,8 @@ bool ClLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate, - reasonIfUnsupported, - input, - output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool ClLayerSupport::IsActivationSupported(const TensorInfo& input, @@ -425,6 +423,29 @@ bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0, output); } +bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + + return false; +} + bool ClLayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const @@ -685,7 +706,8 @@ bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, reasonIfUnsupported, input, output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool ClLayerSupport::IsSliceSupported(const TensorInfo& input, diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index a21589d555..9371717013 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -12,6 +12,7 @@ namespace armnn class ClLayerSupport : public LayerSupportBase { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -102,6 +103,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& ouput, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -223,6 +229,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index f9e6632b0c..4bb2e2a8ce 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -131,7 +131,12 @@ std::unique_ptr ClWorkloadFactory::CreateSubTensorHandle(ITensorH std::unique_ptr ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -249,6 +254,28 @@ std::unique_ptr ClWorkloadFactory::CreateDivision(const DivisionQueue return MakeWorkload(descriptor, info); } +std::unique_ptr ClWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absQueueDescriptor; + absQueueDescriptor.m_Inputs = descriptor.m_Inputs; + absQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return MakeWorkload(absQueueDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtQueueDescriptor; + rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; + rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return MakeWorkload(rsqrtQueueDescriptor, info); + } + return MakeWorkload(descriptor, info); +} + std::unique_ptr ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -450,7 +477,12 @@ std::unique_ptr ClWorkloadFactory::CreateResizeBilinear(const ResizeB std::unique_ptr ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 8f377e959d..980be9192e 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -38,6 +38,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -92,6 +93,9 @@ public: std::unique_ptr CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -178,6 +182,7 @@ public: std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index d79745c420..92e771760f 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -146,18 +146,16 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest) armnn::DataType::Float16>(); } -template -static void ClCreateElementwiseUnaryWorkloadTest() +static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op) { Graph graph; ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager()); - auto workload = CreateElementwiseUnaryWorkloadTest - (factory, graph); + auto workload = CreateElementwiseUnaryWorkloadTest(factory, graph, op); DescriptorType queueDescriptor = workload->GetData(); @@ -170,10 +168,8 @@ static void ClCreateElementwiseUnaryWorkloadTest() BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest) { - ClCreateElementwiseUnaryWorkloadTest(); + ClCreateElementwiseUnaryWorkloadTest( + UnaryOperation::Rsqrt); } template diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index 260f8f68cd..eafdb7c3e5 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -5,12 +5,12 @@ #include -#include #include #include #include #include #include +#include #include #include #include @@ -27,7 +27,15 @@ std::vector defaultBackends = {armnn::Compute::GpuAcc}; // Abs BOOST_AUTO_TEST_CASE(ClAbsEndToEndTestFloat32) { - AbsEndToEnd(defaultBackends); + std::vector expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } // Constant diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index a73837b884..fe9bffbee9 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -129,10 +129,8 @@ bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate, - reasonIfUnsupported, - input, - output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input, @@ -386,6 +384,29 @@ bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& biases); } +bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + + return false; +} + bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const @@ -656,7 +677,8 @@ bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, reasonIfUnsupported, input, output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 8e6cd6aded..d429aeceec 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -12,6 +12,7 @@ namespace armnn class NeonLayerSupport : public LayerSupportBase { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -103,6 +104,11 @@ public: const Optional& biases, Optional reason = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -224,6 +230,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 1cc9e50e0b..82f9bdb924 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -98,7 +98,12 @@ std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const Ten std::unique_ptr NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -214,6 +219,29 @@ std::unique_ptr NeonWorkloadFactory::CreateDivision( return MakeWorkloadHelper(descriptor, info); } +std::unique_ptr NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& + descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absQueueDescriptor; + absQueueDescriptor.m_Inputs = descriptor.m_Inputs; + absQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return std::make_unique(absQueueDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtQueueDescriptor; + rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; + rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return std::make_unique(rsqrtQueueDescriptor, info); + } + return MakeWorkloadHelper(descriptor, info); +} + std::unique_ptr NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -418,7 +446,12 @@ std::unique_ptr NeonWorkloadFactory::CreateResizeBilinear( std::unique_ptr NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const { - return std::make_unique(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index b76a3a340a..44c0629ece 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -39,6 +39,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -92,6 +93,9 @@ public: std::unique_ptr CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const override; ARMNN_DEPRECATED_MSG("Use CreateComparison instead") std::unique_ptr CreateEqual(const EqualQueueDescriptor& descriptor, @@ -181,6 +185,7 @@ public: std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index a08c8f7d2a..400a5a38e2 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -181,36 +181,6 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) DataType::QAsymmU8>(); } -template -static void NeonCreateElementwiseUnaryWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory = - NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); - - auto workload = CreateElementwiseUnaryWorkloadTest - (factory, graph); - - DescriptorType queueDescriptor = workload->GetData(); - - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32Workload) -{ - NeonCreateElementwiseUnaryWorkloadTest(); -} - template static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) { diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index e1c929b17b..4e9fe0f3c3 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -5,13 +5,13 @@ #include -#include #include #include #include #include #include #include +#include #include #include #include @@ -28,7 +28,15 @@ std::vector defaultBackends = {armnn::Compute::CpuAcc}; // Abs BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32) { - AbsEndToEnd(defaultBackends); + std::vector expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } // Constant diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 26a61d45d5..491081dbac 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -70,28 +70,10 @@ std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected, bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - bool supported = true; - std::array supportedTypes = - { - DataType::Float32, - DataType::Float16, - DataType::QAsymmU8, - DataType::QSymmS16 - }; - - supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, - "Reference abs: input type not supported"); - - supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, - "Reference abs: output type not supported"); - - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference abs: input and output types not matching"); - - supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, - "Reference abs: input and output shapes have different number of total elements"); - - return supported; + return IsElementwiseUnarySupported(input, + output, + ElementwiseUnaryDescriptor(UnaryOperation::Abs), + reasonIfUnsupported); } bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, @@ -714,6 +696,39 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, return supported; } +bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + boost::ignore_unused(descriptor); + + std::array supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + bool supported = true; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference elementwise unary: input type not supported"); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference elementwise unary: output type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference elementwise unary: input and output types not matching"); + + supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, + "Reference elementwise unary: input and output shapes" + "have different number of total elements"); + + return supported; +} + bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -1499,28 +1514,10 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - bool supported = true; - std::array supportedTypes = - { - DataType::Float32, - DataType::Float16, - DataType::QAsymmU8, - DataType::QSymmS16 - }; - - supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, - "Reference rsqrt: input type not supported"); - - supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, - "Reference rsqrt: output type not supported"); - - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference rsqrt: input and output types not matching"); - - supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, - "Reference Rsqrt: input and output shapes have different number of total elements"); - - return supported; + return IsElementwiseUnarySupported(input, + output, + ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), + reasonIfUnsupported); } bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index a7d6303d86..123c2643df 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -12,6 +12,7 @@ namespace armnn class RefLayerSupport : public LayerSupportBase { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -117,6 +118,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, @@ -247,7 +253,8 @@ public: const TensorInfo& output, const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; - + + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 2db47d35c2..e7a9c19fc7 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -98,7 +98,11 @@ std::unique_ptr RefWorkloadFactory::CreateTensorHandle(const Tens std::unique_ptr RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + boost::ignore_unused(descriptor); + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs; + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -221,6 +225,12 @@ std::unique_ptr RefWorkloadFactory::CreateDivision(const DivisionQueu return std::make_unique(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + std::unique_ptr RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -463,7 +473,11 @@ std::unique_ptr RefWorkloadFactory::CreateResizeBilinear(const Resize std::unique_ptr RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + boost::ignore_unused(descriptor); + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt; + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 80393c3f3a..b5b9b0faf0 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -59,6 +59,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -113,6 +114,9 @@ public: std::unique_ptr CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateComparison instead") std::unique_ptr CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -204,6 +208,7 @@ public: std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 5f9af59e74..412dc9438c 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -21,7 +21,6 @@ BACKEND_SOURCES := \ RefWorkloadFactory.cpp \ RefRegistryInitializer.cpp \ RefTensorHandleFactory.cpp \ - workloads/Abs.cpp \ workloads/Activation.cpp \ workloads/ArgMinMax.cpp \ workloads/BatchNormImpl.cpp \ @@ -43,7 +42,6 @@ BACKEND_SOURCES := \ workloads/Pad.cpp \ workloads/Pooling2d.cpp \ workloads/PreluImpl.cpp \ - workloads/RefAbsWorkload.cpp \ workloads/RefActivationWorkload.cpp \ workloads/RefArgMinMaxWorkload.cpp \ workloads/RefBatchNormalizationWorkload.cpp \ @@ -60,6 +58,7 @@ BACKEND_SOURCES := \ workloads/RefDequantizeWorkload.cpp \ workloads/RefDetectionPostProcessWorkload.cpp \ workloads/RefElementwiseWorkload.cpp \ + workloads/RefElementwiseUnaryWorkload.cpp \ workloads/RefFakeQuantizationFloat32Workload.cpp \ workloads/RefFloorWorkload.cpp \ workloads/RefFullyConnectedWorkload.cpp \ @@ -78,7 +77,6 @@ BACKEND_SOURCES := \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeBilinearWorkload.cpp \ workloads/RefResizeWorkload.cpp \ - workloads/RefRsqrtWorkload.cpp \ workloads/RefSliceWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ workloads/RefSpaceToBatchNdWorkload.cpp \ @@ -88,7 +86,6 @@ BACKEND_SOURCES := \ workloads/RefSplitterWorkload.cpp \ workloads/RefTransposeConvolution2dWorkload.cpp \ workloads/Resize.cpp \ - workloads/Rsqrt.cpp \ workloads/Slice.cpp \ workloads/SpaceToBatchNd.cpp \ workloads/SpaceToDepth.cpp \ diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 23a8e9b9e9..b83d205970 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -717,41 +717,6 @@ BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc) RefCreateResizeBilinearTest(DataLayout::NHWC); } -template -static void RefCreateRsqrtTest() -{ - Graph graph; - RefWorkloadFactory factory = GetFactory(); - - auto workload = CreateRsqrtWorkloadTest(factory, graph); - - // Checks that outputs are as we expect them (see definition of CreateRsqrtWorkloadTest). - CheckInputOutput(std::move(workload), - TensorInfo({ 1, 1 }, DataType), - TensorInfo({ 1, 1 }, DataType)); - -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32) -{ - RefCreateRsqrtTest(); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16) -{ - RefCreateRsqrtTest(); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtUint8) -{ - RefCreateRsqrtTest(); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtQsymm16) -{ - RefCreateRsqrtTest(); -} - template static void RefCreateBatchToSpaceNdTest() { diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 75eccdee88..54a68810f6 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -5,7 +5,6 @@ #include -#include #include #include #include @@ -13,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -32,17 +32,43 @@ std::vector defaultBackends = {armnn::Compute::CpuRef}; // Abs BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32) { - AbsEndToEnd(defaultBackends); + std::vector expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8) { - AbsEndToEnd(defaultBackends); + // Note the expected output will be implicitly quantized by the below test function + std::vector expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16) { - AbsEndToEnd(defaultBackends); + // Note the expected output will be implicitly quantized by the below test function + std::vector expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } // Constant diff --git a/src/backends/reference/workloads/Abs.cpp b/src/backends/reference/workloads/Abs.cpp deleted file mode 100644 index 6a6a79ca56..0000000000 --- a/src/backends/reference/workloads/Abs.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "Abs.hpp" - -namespace armnn -{ - -void Abs(Decoder& in, - Encoder& out, - const TensorInfo& tensorInfo) -{ - for (unsigned int i = 0u; i < tensorInfo.GetNumElements(); ++i) - { - out[i]; - in[i]; - out.Set(std::abs(in.Get())); - } -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/Abs.hpp b/src/backends/reference/workloads/Abs.hpp index b1165d2d93..b05f2e3367 100644 --- a/src/backends/reference/workloads/Abs.hpp +++ b/src/backends/reference/workloads/Abs.hpp @@ -1,19 +1,22 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include "BaseIterator.hpp" -#include -#include +#pragma once + +#include namespace armnn { - -/// Performs the absolute function elementwise -/// on the inputs to give the outputs. -void Abs(Decoder& in, - Encoder& out, - const TensorInfo& tensorInfo); + template +struct abs : public std::unary_function + { + T + operator () (const T& inputData) const + { + return std::abs(inputData); + } + }; } //namespace armnn diff --git a/src/backends/reference/workloads/Broadcast.cpp b/src/backends/reference/workloads/Broadcast.cpp index 8421a0a7ed..24af0fc4b1 100644 --- a/src/backends/reference/workloads/Broadcast.cpp +++ b/src/backends/reference/workloads/Broadcast.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,4 +30,23 @@ BroadcastLoop::BroadcastLoop(const TensorShape& inShape0, const TensorShape& inS } } +BroadcastLoop::BroadcastLoop(const TensorShape& inShape, const TensorShape& outShape) +: m_DimData(outShape.GetNumDimensions()) +{ + const unsigned int numDims = GetNumDimensions(); + + unsigned int sIn = 1; + unsigned int sOut = 1; + + for (unsigned int j = numDims - 1, k = 0; k < numDims ; k++, j--) + { + m_DimData[j].m_DimSize = outShape[j]; + m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0; + m_DimData[j].m_StrideOut = sOut; + + sIn *= inShape[j]; + sOut *= outShape[j]; + } +} + } // namespace armnn diff --git a/src/backends/reference/workloads/Broadcast.hpp b/src/backends/reference/workloads/Broadcast.hpp index 5bf6be8939..a3d944ae75 100644 --- a/src/backends/reference/workloads/Broadcast.hpp +++ b/src/backends/reference/workloads/Broadcast.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -15,6 +15,8 @@ struct BroadcastLoop { BroadcastLoop(const TensorShape& inShape0, const TensorShape& inShape1, const TensorShape& outShape); + BroadcastLoop(const TensorShape& inShape, const TensorShape& outShape); + unsigned int GetNumDimensions() { return static_cast(m_DimData.size()); @@ -56,6 +58,37 @@ struct BroadcastLoop outData -= outDataMovement; } + template + void Unroll(Func operationFunc, + unsigned int dimension, + DecoderOp& inData, + EncoderOp& outData) + { + if (dimension >= GetNumDimensions()) + { + outData.Set(operationFunc(inData.Get())); + return; + } + + unsigned int inDataMovement = 0; + unsigned int outDataMovement = 0; + + for (unsigned int i = 0; i < m_DimData[dimension].m_DimSize; i++) + { + Unroll(operationFunc, dimension + 1, inData, outData); + + inData += m_DimData[dimension].m_Stride1; + outData += m_DimData[dimension].m_StrideOut; + + inDataMovement += m_DimData[dimension].m_Stride1; + outDataMovement += m_DimData[dimension].m_StrideOut; + } + + // move iterator back to the start + inData -= inDataMovement; + outData -= outDataMovement; + } + private: // Struct to hold the dimension data. struct BroadcastDimensionData diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index dbbdd89fd4..6795204d59 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -4,7 +4,6 @@ # list(APPEND armnnRefBackendWorkloads_sources - Abs.cpp Abs.hpp ArgMinMax.cpp ArgMinMax.hpp @@ -33,6 +32,7 @@ list(APPEND armnnRefBackendWorkloads_sources ElementwiseFunction.cpp ElementwiseFunction.hpp Encoders.hpp + Exp.hpp FullyConnected.cpp FullyConnected.hpp Gather.cpp @@ -55,8 +55,6 @@ list(APPEND armnnRefBackendWorkloads_sources Pooling2d.hpp PreluImpl.cpp PreluImpl.hpp - RefAbsWorkload.cpp - RefAbsWorkload.hpp RefActivationWorkload.cpp RefActivationWorkload.hpp RefArgMinMaxWorkload.cpp @@ -89,6 +87,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefDequantizeWorkload.hpp RefDetectionPostProcessWorkload.cpp RefDetectionPostProcessWorkload.hpp + RefElementwiseUnaryWorkload.cpp + RefElementwiseUnaryWorkload.hpp RefFakeQuantizationFloat32Workload.cpp RefFakeQuantizationFloat32Workload.hpp RefFloorWorkload.cpp @@ -125,8 +125,6 @@ list(APPEND armnnRefBackendWorkloads_sources RefResizeBilinearWorkload.hpp RefResizeWorkload.cpp RefResizeWorkload.hpp - RefRsqrtWorkload.cpp - RefRsqrtWorkload.hpp RefSliceWorkload.cpp RefSliceWorkload.hpp RefSoftmaxWorkload.cpp @@ -147,7 +145,6 @@ list(APPEND armnnRefBackendWorkloads_sources RefWorkloadUtils.hpp Resize.cpp Resize.hpp - Rsqrt.cpp Rsqrt.hpp Slice.cpp Slice.hpp @@ -159,6 +156,7 @@ list(APPEND armnnRefBackendWorkloads_sources SpaceToDepth.cpp Splitter.hpp Splitter.cpp + Sqrt.hpp Stack.cpp Stack.hpp StridedSlice.hpp diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp index 888037f9a6..5687cf5861 100644 --- a/src/backends/reference/workloads/ElementwiseFunction.cpp +++ b/src/backends/reference/workloads/ElementwiseFunction.cpp @@ -7,36 +7,56 @@ #include "Broadcast.hpp" #include #include "Minimum.hpp" - #include "Maximum.hpp" +#include "Abs.hpp" +#include "Exp.hpp" +#include "Rsqrt.hpp" +#include "Sqrt.hpp" + namespace armnn { template -ElementwiseFunction::ElementwiseFunction(const TensorShape& inShape0, - const TensorShape& inShape1, - const TensorShape& outShape, - armnn::Decoder& inData0, - armnn::Decoder& inData1, - armnn::Encoder& outData) +ElementwiseBinaryFunction::ElementwiseBinaryFunction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + Decoder& inData0, + Decoder& inData1, + Encoder& outData) { BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData); } +template +ElementwiseUnaryFunction::ElementwiseUnaryFunction(const TensorShape& inShape, + const TensorShape& outShape, + Decoder& inData, + Encoder& outData) +{ + BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData); +} + } //namespace armnn -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; // Comparison -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; -template struct armnn::ElementwiseFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; +template struct armnn::ElementwiseBinaryFunction>; + +// Unary +template struct armnn::ElementwiseUnaryFunction>; +template struct armnn::ElementwiseUnaryFunction>; +template struct armnn::ElementwiseUnaryFunction>; +template struct armnn::ElementwiseUnaryFunction>; +template struct armnn::ElementwiseUnaryFunction>; diff --git a/src/backends/reference/workloads/ElementwiseFunction.hpp b/src/backends/reference/workloads/ElementwiseFunction.hpp index fd1fab0690..8259ba5ac7 100644 --- a/src/backends/reference/workloads/ElementwiseFunction.hpp +++ b/src/backends/reference/workloads/ElementwiseFunction.hpp @@ -12,17 +12,29 @@ namespace armnn { template -struct ElementwiseFunction +struct ElementwiseBinaryFunction { using OutType = typename Functor::result_type; using InType = typename Functor::first_argument_type; - ElementwiseFunction(const TensorShape& inShape0, - const TensorShape& inShape1, - const TensorShape& outShape, - armnn::Decoder& inData0, - armnn::Decoder& inData1, - armnn::Encoder& outData); + ElementwiseBinaryFunction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + Decoder& inData0, + Decoder& inData1, + Encoder& outData); +}; + +template +struct ElementwiseUnaryFunction +{ + using OutType = typename Functor::result_type; + using InType = typename Functor::argument_type; + + ElementwiseUnaryFunction(const TensorShape& inShape, + const TensorShape& outShape, + Decoder& inData, + Encoder& outData); }; } //namespace armnn diff --git a/src/backends/reference/workloads/Exp.hpp b/src/backends/reference/workloads/Exp.hpp new file mode 100644 index 0000000000..1a046728ba --- /dev/null +++ b/src/backends/reference/workloads/Exp.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ + template +struct exp : public std::unary_function + { + T + operator () (const T& inputData) const + { + return std::exp(inputData); + } + }; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefAbsWorkload.cpp b/src/backends/reference/workloads/RefAbsWorkload.cpp deleted file mode 100644 index 5c1f8c0c69..0000000000 --- a/src/backends/reference/workloads/RefAbsWorkload.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefAbsWorkload.hpp" - -#include "Abs.hpp" -#include "Decoders.hpp" -#include "Encoders.hpp" -#include "RefWorkloadUtils.hpp" - -#include - -namespace armnn -{ - -void RefAbsWorkload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAbsWorkload_Execute"); - - const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); - - std::unique_ptr> decoderPtr = MakeDecoder(inputTensorInfo, m_Data.m_Inputs[0]->Map()); - Decoder& decoder = *decoderPtr; - - const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); - - std::unique_ptr> encoderPtr = MakeEncoder(outputTensorInfo, m_Data.m_Outputs[0]->Map()); - Encoder& encoder = *encoderPtr; - - Abs(decoder, - encoder, - inputTensorInfo); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefAbsWorkload.hpp b/src/backends/reference/workloads/RefAbsWorkload.hpp deleted file mode 100644 index 68105556d5..0000000000 --- a/src/backends/reference/workloads/RefAbsWorkload.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include -#include - -namespace armnn -{ - -class RefAbsWorkload : public BaseWorkload -{ -public: - using BaseWorkload::BaseWorkload; - virtual void Execute() const override; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefComparisonWorkload.cpp b/src/backends/reference/workloads/RefComparisonWorkload.cpp index 60446226be..52ad9a2879 100644 --- a/src/backends/reference/workloads/RefComparisonWorkload.cpp +++ b/src/backends/reference/workloads/RefComparisonWorkload.cpp @@ -52,12 +52,12 @@ void RefComparisonWorkload::Execute() const m_Input1->Reset(m_Data.m_Inputs[1]->Map()); m_Output->Reset(m_Data.m_Outputs[0]->Map()); - using EqualFunction = ElementwiseFunction>; - using GreaterFunction = ElementwiseFunction>; - using GreaterOrEqualFunction = ElementwiseFunction>; - using LessFunction = ElementwiseFunction>; - using LessOrEqualFunction = ElementwiseFunction>; - using NotEqualFunction = ElementwiseFunction>; + using EqualFunction = ElementwiseBinaryFunction>; + using GreaterFunction = ElementwiseBinaryFunction>; + using GreaterOrEqualFunction = ElementwiseBinaryFunction>; + using LessFunction = ElementwiseBinaryFunction>; + using LessOrEqualFunction = ElementwiseBinaryFunction>; + using NotEqualFunction = ElementwiseBinaryFunction>; switch (m_Data.m_Parameters.m_Operation) { diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp new file mode 100644 index 0000000000..4fbb0d123f --- /dev/null +++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp @@ -0,0 +1,95 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefElementwiseUnaryWorkload.hpp" + +#include "Decoders.hpp" +#include "ElementwiseFunction.hpp" +#include "Encoders.hpp" +#include "RefWorkloadUtils.hpp" +#include "Abs.hpp" +#include "Exp.hpp" +#include "Rsqrt.hpp" +#include "Sqrt.hpp" + +#include + +#include + +#include + +namespace armnn +{ + +RefElementwiseUnaryWorkload::RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& desc, + const WorkloadInfo& info) + : BaseWorkload(desc, info) +{} + +void RefElementwiseUnaryWorkload::PostAllocationConfigure() +{ + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + m_Input = MakeDecoder(inputInfo); + + m_Output = MakeEncoder(outputInfo); +} + +void RefElementwiseUnaryWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseUnaryWorkload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const TensorShape& inShape = inputInfo.GetShape(); + const TensorShape& outShape = outputInfo.GetShape(); + + m_Input->Reset(m_Data.m_Inputs[0]->Map()); + m_Output->Reset(m_Data.m_Outputs[0]->Map()); + + using AbsFunction = ElementwiseUnaryFunction>; + using ExpFunction = ElementwiseUnaryFunction>; + using NegFunction = ElementwiseUnaryFunction>; + using RsqrtFunction = ElementwiseUnaryFunction>; + using SqrtFunction = ElementwiseUnaryFunction>; + + switch (m_Data.m_Parameters.m_Operation) + { + case UnaryOperation::Abs: + { + AbsFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Exp: + { + ExpFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Neg: + { + NegFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Rsqrt: + { + RsqrtFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Sqrt: + { + SqrtFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + default: + { + throw InvalidArgumentException(std::string("Unsupported unary operation ") + + GetUnaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION()); + } + } +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp new file mode 100644 index 0000000000..efb2865ebd --- /dev/null +++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp @@ -0,0 +1,33 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "BaseIterator.hpp" + +#include +#include + +namespace armnn +{ + +class RefElementwiseUnaryWorkload : public BaseWorkload +{ +public: + using BaseWorkload::m_Data; + + RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info); + void PostAllocationConfigure() override; + void Execute() const override; + +private: + using InType = float; + using OutType = float; + + std::unique_ptr> m_Input; + std::unique_ptr> m_Output; +}; + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp index 7e02f032ef..18bf0a7ad9 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp @@ -53,12 +53,12 @@ void RefElementwiseWorkload::Execute() c m_Input1->Reset(m_Data.m_Inputs[1]->Map()); m_Output->Reset(m_Data.m_Outputs[0]->Map()); - ElementwiseFunction(inShape0, - inShape1, - outShape, - *m_Input0, - *m_Input1, - *m_Output); + ElementwiseBinaryFunction(inShape0, + inShape1, + outShape, + *m_Input0, + *m_Input1, + *m_Output); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp index ee0d80b172..264ddce2de 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp @@ -21,8 +21,8 @@ template { public: - using InType = typename ElementwiseFunction::InType; - using OutType = typename ElementwiseFunction::OutType; + using InType = typename ElementwiseBinaryFunction::InType; + using OutType = typename ElementwiseBinaryFunction::OutType; using BaseWorkload::m_Data; RefElementwiseWorkload(const ParentDescriptor& descriptor, const WorkloadInfo& info); diff --git a/src/backends/reference/workloads/RefRsqrtWorkload.cpp b/src/backends/reference/workloads/RefRsqrtWorkload.cpp deleted file mode 100644 index fd6b9a3549..0000000000 --- a/src/backends/reference/workloads/RefRsqrtWorkload.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefRsqrtWorkload.hpp" - -#include "Decoders.hpp" -#include "Encoders.hpp" -#include "RefWorkloadUtils.hpp" -#include "Rsqrt.hpp" - -#include - -namespace armnn -{ - -void RefRsqrtWorkload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefRsqrtWorkload_Execute"); - - const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); - - std::unique_ptr> decoderPtr = MakeDecoder(inputTensorInfo, m_Data.m_Inputs[0]->Map()); - Decoder& decoder = *decoderPtr; - - const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); - - std::unique_ptr> encoderPtr = MakeEncoder(outputTensorInfo, m_Data.m_Outputs[0]->Map()); - Encoder& encoder = *encoderPtr; - - Rsqrt(decoder, - encoder, - GetTensorInfo(m_Data.m_Inputs[0])); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefRsqrtWorkload.hpp b/src/backends/reference/workloads/RefRsqrtWorkload.hpp deleted file mode 100644 index 6c8ad5bc60..0000000000 --- a/src/backends/reference/workloads/RefRsqrtWorkload.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include -#include - -namespace armnn -{ - -class RefRsqrtWorkload : public BaseWorkload -{ -public: - using BaseWorkload::BaseWorkload; - virtual void Execute() const override; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 1f9ad4a19a..7034b67aa5 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -5,7 +5,6 @@ #pragma once -#include "Abs.hpp" #include "Activation.hpp" #include "ArgMinMax.hpp" #include "BatchNormImpl.hpp" @@ -15,7 +14,6 @@ #include "FullyConnected.hpp" #include "Gather.hpp" #include "Pooling2d.hpp" -#include "RefAbsWorkload.hpp" #include "RefActivationWorkload.hpp" #include "RefArgMinMaxWorkload.hpp" #include "RefBatchNormalizationWorkload.hpp" @@ -33,6 +31,7 @@ #include "RefDetectionPostProcessWorkload.hpp" #include "RefDequantizeWorkload.hpp" #include "RefElementwiseWorkload.hpp" +#include "RefElementwiseUnaryWorkload.hpp" #include "RefFullyConnectedWorkload.hpp" #include "RefFloorWorkload.hpp" #include "RefFakeQuantizationFloat32Workload.hpp" @@ -51,7 +50,6 @@ #include "RefReshapeWorkload.hpp" #include "RefResizeBilinearWorkload.hpp" #include "RefResizeWorkload.hpp" -#include "RefRsqrtWorkload.hpp" #include "RefSliceWorkload.hpp" #include "RefSplitterWorkload.hpp" #include "RefSoftmaxWorkload.hpp" diff --git a/src/backends/reference/workloads/Rsqrt.cpp b/src/backends/reference/workloads/Rsqrt.cpp deleted file mode 100644 index 5abc2c8f7b..0000000000 --- a/src/backends/reference/workloads/Rsqrt.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "Rsqrt.hpp" - -#include - -namespace armnn -{ - -void Rsqrt(Decoder& in, - Encoder& out, - const TensorInfo& tensorInfo) -{ - for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i) - { - out[i]; - in[i]; - out.Set(1.f / sqrtf(in.Get())); - } -} - -} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/Rsqrt.hpp b/src/backends/reference/workloads/Rsqrt.hpp index ffc6b18d13..47ebcf36f6 100644 --- a/src/backends/reference/workloads/Rsqrt.hpp +++ b/src/backends/reference/workloads/Rsqrt.hpp @@ -1,19 +1,22 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include "BaseIterator.hpp" -#include -#include +#pragma once + +#include namespace armnn { - -/// Performs the reciprocal squareroot function elementwise -/// on the inputs to give the outputs. -void Rsqrt(Decoder& in, - Encoder& out, - const TensorInfo& tensorInfo); + template +struct rsqrt : public std::unary_function + { + T + operator () (const T& inputData) const + { + return 1 / std::sqrt(inputData); + } + }; } //namespace armnn diff --git a/src/backends/reference/workloads/Sqrt.hpp b/src/backends/reference/workloads/Sqrt.hpp new file mode 100644 index 0000000000..e4ff6a4829 --- /dev/null +++ b/src/backends/reference/workloads/Sqrt.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ + template +struct sqrt : public std::unary_function + { + T + operator () (const T& inputData) const + { + return std::sqrt(inputData); + } + }; + +} //namespace armnn -- cgit v1.2.1