From aba90cd608eb65ab459cd71a6724511a1507763b Mon Sep 17 00:00:00 2001 From: James Conroy Date: Fri, 6 Nov 2020 16:28:18 +0000 Subject: IVGCVSW-5091 Add Logical ops frontend and ref impl * Add frontend and reference implementation for logical ops NOT, AND, OR. * Unary NOT uses existing ElementwiseUnary layer and ElementwiseUnary descriptor. * Binary AND/OR uses new layer LogicalBinary and new LogicalBinary descriptor. * Add serialization/deserializion support and add missing ElementwiseUnary deserializer code. * Add additional Boolean decoder in BaseIterator.hpp. Signed-off-by: James Conroy Change-Id: Id343b01174053a166de1b98b6175e04a5065f720 --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 20 + include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 11 + include/armnn/ILayerVisitor.hpp | 8 + include/armnn/INetwork.hpp | 7 + include/armnn/LayerVisitorBase.hpp | 4 + include/armnn/Types.hpp | 17 +- include/armnn/TypesUtils.hpp | 23 +- src/armnn/InternalTypes.hpp | 1 + src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 6 + src/armnn/Network.hpp | 3 + src/armnn/layers/LogicalBinaryLayer.cpp | 80 +++ src/armnn/layers/LogicalBinaryLayer.hpp | 50 ++ .../test/TestNameAndDescriptorLayerVisitor.cpp | 7 + .../test/TestNameAndDescriptorLayerVisitor.hpp | 1 + src/armnnDeserializer/Deserializer.cpp | 47 ++ src/armnnDeserializer/Deserializer.hpp | 1 + src/armnnSerializer/ArmnnSchema.fbs | 23 +- src/armnnSerializer/Serializer.cpp | 15 + src/armnnSerializer/Serializer.hpp | 4 + src/armnnSerializer/SerializerUtils.cpp | 16 + src/armnnSerializer/SerializerUtils.hpp | 3 + src/armnnSerializer/test/SerializerTests.cpp | 68 +++ src/backends/backendsCommon/LayerSupportBase.cpp | 19 +- src/backends/backendsCommon/LayerSupportBase.hpp | 11 + src/backends/backendsCommon/WorkloadData.cpp | 50 +- src/backends/backendsCommon/WorkloadData.hpp | 5 + src/backends/backendsCommon/WorkloadFactory.cpp | 27 + src/backends/backendsCommon/WorkloadFactory.hpp | 6 + src/backends/backendsCommon/common.mk | 1 + src/backends/backendsCommon/test/CMakeLists.txt | 2 + .../test/IsLayerSupportedTestImpl.hpp | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../test/layerTests/LogicalTestImpl.cpp | 572 +++++++++++++++++++++ .../test/layerTests/LogicalTestImpl.hpp | 83 +++ src/backends/reference/RefLayerSupport.cpp | 47 ++ src/backends/reference/RefLayerSupport.hpp | 11 + src/backends/reference/RefWorkloadFactory.cpp | 13 + src/backends/reference/RefWorkloadFactory.hpp | 6 + src/backends/reference/backend.mk | 2 + src/backends/reference/test/RefLayerTests.cpp | 13 + src/backends/reference/workloads/BaseIterator.hpp | 34 ++ src/backends/reference/workloads/CMakeLists.txt | 4 + src/backends/reference/workloads/Decoders.hpp | 18 + .../reference/workloads/ElementwiseFunction.cpp | 25 + .../reference/workloads/ElementwiseFunction.hpp | 26 + .../workloads/RefLogicalBinaryWorkload.cpp | 75 +++ .../workloads/RefLogicalBinaryWorkload.hpp | 34 ++ .../workloads/RefLogicalUnaryWorkload.cpp | 64 +++ .../workloads/RefLogicalUnaryWorkload.hpp | 33 ++ src/backends/reference/workloads/RefWorkloads.hpp | 2 + 54 files changed, 1591 insertions(+), 16 deletions(-) create mode 100644 src/armnn/layers/LogicalBinaryLayer.cpp create mode 100644 src/armnn/layers/LogicalBinaryLayer.hpp create mode 100644 src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp create mode 100644 src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp create mode 100644 src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp create mode 100644 src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp create mode 100644 src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp diff --git a/Android.mk b/Android.mk index d61d465e74..e8bf4b668e 100644 --- a/Android.mk +++ b/Android.mk @@ -170,6 +170,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/InputLayer.cpp \ src/armnn/layers/InstanceNormalizationLayer.cpp \ src/armnn/layers/L2NormalizationLayer.cpp \ + src/armnn/layers/LogicalBinaryLayer.cpp \ src/armnn/layers/LogSoftmaxLayer.cpp \ src/armnn/layers/LstmLayer.cpp \ src/armnn/layers/MapLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index c2e394dae7..240767f43b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -300,6 +300,8 @@ list(APPEND armnn_sources src/armnn/layers/InstanceNormalizationLayer.cpp src/armnn/layers/L2NormalizationLayer.hpp src/armnn/layers/L2NormalizationLayer.cpp + src/armnn/layers/LogicalBinaryLayer.hpp + src/armnn/layers/LogicalBinaryLayer.cpp src/armnn/layers/LogSoftmaxLayer.hpp src/armnn/layers/LogSoftmaxLayer.cpp src/armnn/layers/LstmLayer.cpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 2834336fb2..ac0d585751 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -1266,4 +1266,24 @@ struct TransposeDescriptor PermutationVector m_DimMappings; }; +/// A LogicalBinaryDescriptor for the LogicalBinaryLayer +struct LogicalBinaryDescriptor +{ + LogicalBinaryDescriptor() + : LogicalBinaryDescriptor(LogicalBinaryOperation::LogicalAnd) + {} + + LogicalBinaryDescriptor(LogicalBinaryOperation operation) + : m_Operation(operation) + {} + + bool operator ==(const LogicalBinaryDescriptor &rhs) const + { + return m_Operation == rhs.m_Operation; + } + + /// Specifies the logical operation to execute + LogicalBinaryOperation m_Operation; +}; + } // namespace armnn diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index fba976c788..dff5ec73ad 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -23,6 +23,7 @@ struct FullyConnectedDescriptor; struct GatherDescriptor; struct InstanceNormalizationDescriptor; struct L2NormalizationDescriptor; +struct LogicalBinaryDescriptor; struct LstmDescriptor; struct MeanDescriptor; struct NormalizationDescriptor; diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index ed234fe4db..200409361c 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -205,6 +205,17 @@ public: const L2NormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsLogicalBinarySupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + const LogicalBinaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + + virtual bool IsLogicalUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsLogSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const LogSoftmaxDescriptor& descriptor, diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index 385ad62225..b8e741d01e 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -268,6 +268,14 @@ public: const LogSoftmaxDescriptor& logSoftmaxDescriptor, const char* name = nullptr) = 0; + /// Function that a logical binary layer should call back to when its Accept(ILayerVisitor&) function is invoked. + /// @param layer - pointer to the layer which is calling back to this visit function. + /// @param logicalBinaryDescriptor - LogicalBinaryDescriptor to configure the logical unary layer. + /// @param name - Optional name for the layer. + virtual void VisitLogicalBinaryLayer(const IConnectableLayer* layer, + const LogicalBinaryDescriptor& logicalBinaryDescriptor, + const char* name = nullptr) = 0; + /// Function an Lstm layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. /// @param descriptor - Parameters controlling the operation of the Lstm operation. diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 70ad94fa51..b89df63aa4 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -584,6 +584,13 @@ public: const LstmInputParams& params, const char* name = nullptr) = 0; + /// Adds a Logical Binary layer to the network. + /// @param descriptor - Description of the Logical Binary layer. + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor, + const char* name = nullptr) = 0; + virtual void Accept(ILayerVisitor& visitor) const = 0; protected: diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index 75237a4372..a97530ec40 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -141,6 +141,10 @@ public: const LogSoftmaxDescriptor&, const char*) override { DefaultPolicy::Apply(__func__); } + void VisitLogicalBinaryLayer(const IConnectableLayer*, + const LogicalBinaryDescriptor&, + const char*) override {DefaultPolicy::Apply(__func__); } + void VisitLstmLayer(const IConnectableLayer*, const LstmDescriptor&, const LstmInputParams&, diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index 4a01549a14..46f246f46d 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -85,13 +85,20 @@ enum class ComparisonOperation NotEqual = 5 }; +enum class LogicalBinaryOperation +{ + LogicalAnd = 0, + LogicalOr = 1 +}; + enum class UnaryOperation { - Abs = 0, - Exp = 1, - Sqrt = 2, - Rsqrt = 3, - Neg = 4 + Abs = 0, + Exp = 1, + Sqrt = 2, + Rsqrt = 3, + Neg = 4, + LogicalNot = 5 }; enum class PoolingAlgorithm diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp index efc69deb67..1012fcfa22 100644 --- a/include/armnn/TypesUtils.hpp +++ b/include/armnn/TypesUtils.hpp @@ -72,12 +72,23 @@ constexpr char const* GetUnaryOperationAsCString(UnaryOperation operation) { switch (operation) { - case UnaryOperation::Abs: return "Abs"; - case UnaryOperation::Exp: return "Exp"; - case UnaryOperation::Sqrt: return "Sqrt"; - case UnaryOperation::Rsqrt: return "Rsqrt"; - case UnaryOperation::Neg: return "Neg"; - default: return "Unknown"; + case UnaryOperation::Abs: return "Abs"; + case UnaryOperation::Exp: return "Exp"; + case UnaryOperation::Sqrt: return "Sqrt"; + case UnaryOperation::Rsqrt: return "Rsqrt"; + case UnaryOperation::Neg: return "Neg"; + case UnaryOperation::LogicalNot: return "LogicalNot"; + default: return "Unknown"; + } +} + +constexpr char const* GetLogicalBinaryOperationAsCString(LogicalBinaryOperation operation) +{ + switch (operation) + { + case LogicalBinaryOperation::LogicalAnd: return "LogicalAnd"; + case LogicalBinaryOperation::LogicalOr: return "LogicalOr"; + default: return "Unknown"; } } diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 778408ae60..6e47399871 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -40,6 +40,7 @@ X(Input) \ X(InstanceNormalization) \ X(L2Normalization) \ + X(LogicalBinary) \ X(LogSoftmax) \ X(Lstm) \ X(QLstm) \ diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index ccc5ef2b4c..b9ca61a70b 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -34,6 +34,7 @@ #include "layers/InputLayer.hpp" #include "layers/InstanceNormalizationLayer.hpp" #include "layers/L2NormalizationLayer.hpp" +#include "layers/LogicalBinaryLayer.hpp" #include "layers/LogSoftmaxLayer.hpp" #include "layers/LstmLayer.hpp" #include "layers/MapLayer.hpp" @@ -126,6 +127,7 @@ DECLARE_LAYER(Gather) DECLARE_LAYER(Input) DECLARE_LAYER(InstanceNormalization) DECLARE_LAYER(L2Normalization) +DECLARE_LAYER(LogicalBinary) DECLARE_LAYER(LogSoftmax) DECLARE_LAYER(Lstm) DECLARE_LAYER(Map) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 347e39b4c8..5c55641c82 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1999,6 +1999,12 @@ IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor, return layer; } +IConnectableLayer* Network::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor, + const char* name) +{ + return m_Graph->AddLayer(logicalBinaryDescriptor, name); +} + void Network::Accept(ILayerVisitor& visitor) const { for (auto layer : GetGraph()) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index b09ac450d5..c652edb416 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -250,6 +250,9 @@ public: IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params, const char* name = nullptr) override; + IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor, + const char* name = nullptr) override; + void Accept(ILayerVisitor& visitor) const override; private: diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp new file mode 100644 index 0000000000..0ae5ea5641 --- /dev/null +++ b/src/armnn/layers/LogicalBinaryLayer.cpp @@ -0,0 +1,80 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "LogicalBinaryLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include +#include + +#include + +namespace armnn +{ + +LogicalBinaryLayer::LogicalBinaryLayer(const LogicalBinaryDescriptor& param, const char* name) + : LayerWithParameters(2, 1, LayerType::LogicalBinary, param, name) +{ +} + +std::unique_ptr LogicalBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + LogicalBinaryQueueDescriptor descriptor; + return factory.CreateLogicalBinary(descriptor, PrepInfoAndDesc(descriptor)); +} + +LogicalBinaryLayer* LogicalBinaryLayer::Clone(Graph& graph) const +{ + return CloneBase(graph, m_Param, GetName()); +} + +std::vector LogicalBinaryLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + ARMNN_ASSERT(inputShapes.size() == 2); + const TensorShape& input0 = inputShapes[0]; + const TensorShape& input1 = inputShapes[1]; + + ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); + unsigned int numDims = input0.GetNumDimensions(); + + std::vector dims(numDims); + for (unsigned int i = 0; i < numDims; i++) + { + unsigned int dim0 = input0[i]; + unsigned int dim1 = input1[i]; + + ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1, + "Dimensions should either match or one should be of size 1."); + + dims[i] = std::max(dim0, dim1); + } + + return std::vector({ TensorShape(numDims, dims.data()) }); +} + +void LogicalBinaryLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(2, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + std::vector inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() + }); + ARMNN_ASSERT(inferredShapes.size() == 1); + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer"); +} + +void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp new file mode 100644 index 0000000000..c6b024b36b --- /dev/null +++ b/src/armnn/layers/LogicalBinaryLayer.hpp @@ -0,0 +1,50 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a Logical Binary operation. +class LogicalBinaryLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the LogicalBinary type + /// @param [in] graph The graph where this layer can be found + /// @param [in] factory The workload factory which will create the workload + /// @return A pointer to the created workload, or nullptr if not created + virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer + /// @param [in] graph The graph into which this layer is being cloned + LogicalBinaryLayer* Clone(Graph& graph) const override; + + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + /// Check if the input tensor shape(s) will lead to a valid configuration + /// of @ref LogicalBinaryLayer + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a LogicalBinaryLayer + /// @param [in] param LogicalBinaryDescriptor to configure the LogicalBinaryLayer + /// @param [in] name Optional name for the layer + LogicalBinaryLayer(const LogicalBinaryDescriptor& param, const char* name); + + /// Default destructor + ~LogicalBinaryLayer() = default; +}; + +} // namespace armnn \ No newline at end of file diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp index 6ab9f9e1e4..7d4dcaae0e 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp @@ -125,6 +125,12 @@ armnn::L2NormalizationDescriptor GetDescriptor return descriptor; } +template<> +armnn::LogicalBinaryDescriptor GetDescriptor() +{ + return armnn::LogicalBinaryDescriptor(armnn::LogicalBinaryOperation::LogicalOr); +} + template<> armnn::MeanDescriptor GetDescriptor() { @@ -280,6 +286,7 @@ TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Fill) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Gather) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization) +TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogicalBinary) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Mean) TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Normalization) diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp index df0e055157..dc6d11440f 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp @@ -53,6 +53,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Fill) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Gather) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(InstanceNormalization) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(L2Normalization) +DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(LogicalBinary) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(LogSoftmax) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Mean) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Normalization) diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 1b62484d3e..1bb004d449 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -194,6 +194,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_GreaterLayer] = &Deserializer::ParseGreater; m_ParserFunctions[Layer_InstanceNormalizationLayer] = &Deserializer::ParseInstanceNormalization; m_ParserFunctions[Layer_L2NormalizationLayer] = &Deserializer::ParseL2Normalization; + m_ParserFunctions[Layer_LogicalBinaryLayer] = &Deserializer::ParseLogicalBinary; m_ParserFunctions[Layer_LogSoftmaxLayer] = &Deserializer::ParseLogSoftmax; m_ParserFunctions[Layer_LstmLayer] = &Deserializer::ParseLstm; m_ParserFunctions[Layer_MaximumLayer] = &Deserializer::ParseMaximum; @@ -267,6 +268,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_DivisionLayer()->base(); case Layer::Layer_EqualLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_EqualLayer()->base(); + case Layer::Layer_ElementwiseUnaryLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_ElementwiseUnaryLayer()->base(); case Layer::Layer_FullyConnectedLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_FullyConnectedLayer()->base(); case Layer::Layer_FillLayer: @@ -283,6 +286,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_InstanceNormalizationLayer()->base(); case Layer::Layer_L2NormalizationLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_L2NormalizationLayer()->base(); + case Layer::Layer_LogicalBinaryLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_LogicalBinaryLayer()->base(); case Layer::Layer_LogSoftmaxLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_LogSoftmaxLayer()->base(); case Layer::Layer_LstmLayer: @@ -454,6 +459,19 @@ armnn::ComparisonOperation ToComparisonOperation(armnnSerializer::ComparisonOper } } +armnn::LogicalBinaryOperation ToLogicalBinaryOperation(armnnSerializer::LogicalBinaryOperation operation) +{ + switch (operation) + { + case armnnSerializer::LogicalBinaryOperation::LogicalBinaryOperation_LogicalAnd: + return armnn::LogicalBinaryOperation::LogicalAnd; + case armnnSerializer::LogicalBinaryOperation::LogicalBinaryOperation_LogicalOr: + return armnn::LogicalBinaryOperation::LogicalOr; + default: + throw armnn::InvalidArgumentException("Logical Binary operation unknown"); + } +} + armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation) { switch (operation) @@ -468,6 +486,8 @@ armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation return armnn::UnaryOperation::Exp; case armnnSerializer::UnaryOperation::UnaryOperation_Neg: return armnn::UnaryOperation::Neg; + case armnnSerializer::UnaryOperation::UnaryOperation_LogicalNot: + return armnn::UnaryOperation::LogicalNot; default: throw armnn::InvalidArgumentException("Unary operation unknown"); } @@ -1512,6 +1532,33 @@ void Deserializer::ParseL2Normalization(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParseLogicalBinary(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + CHECK_LOCATION(); + + auto inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_LogicalBinaryLayer(); + auto fbDescriptor = fbLayer->descriptor(); + + armnn::LogicalBinaryDescriptor descriptor; + descriptor.m_Operation = ToLogicalBinaryOperation(fbDescriptor->operation()); + + const std::string& layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddLogicalBinaryLayer(descriptor, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void Deserializer::ParseLogSoftmax(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index 4a1bdad8a8..2a8639f7fc 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -103,6 +103,7 @@ private: void ParseGreater(GraphPtr graph, unsigned int layerIndex); void ParseInstanceNormalization(GraphPtr graph, unsigned int layerIndex); void ParseL2Normalization(GraphPtr graph, unsigned int layerIndex); + void ParseLogicalBinary(GraphPtr graph, unsigned int layerIndex); void ParseLogSoftmax(GraphPtr graph, unsigned int layerIndex); void ParseMaximum(GraphPtr graph, unsigned int layerIndex); void ParseMean(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index e1b6e1f768..1f71ce19f2 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -159,7 +159,8 @@ enum LayerType : uint { Transpose = 55, QLstm = 56, Fill = 57, - Rank = 58 + Rank = 58, + LogicalBinary = 59 } // Base layer table to be used as part of other layers @@ -270,7 +271,8 @@ enum UnaryOperation : byte { Rsqrt = 1, Sqrt = 2, Exp = 3, - Neg = 4 + Neg = 4, + LogicalNot = 5 } table ElementwiseUnaryDescriptor { @@ -362,6 +364,20 @@ table L2NormalizationDescriptor { eps:float = 1e-12; } +enum LogicalBinaryOperation : byte { + LogicalAnd = 0, + LogicalOr = 1 +} + +table LogicalBinaryDescriptor { + operation:LogicalBinaryOperation; +} + +table LogicalBinaryLayer { + base:LayerBase; + descriptor:LogicalBinaryDescriptor; +} + table MinimumLayer { base:LayerBase; } @@ -924,7 +940,8 @@ union Layer { TransposeLayer, QLstmLayer, FillLayer, - RankLayer + RankLayer, + LogicalBinaryLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index f85aae1b6a..379cce2109 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -564,6 +564,21 @@ void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer); } +void SerializerVisitor::VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer, + const armnn::LogicalBinaryDescriptor& descriptor, + const char* name) +{ + IgnoreUnused(name); + + auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary); + auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor( + m_flatBufferBuilder, + GetFlatBufferLogicalBinaryOperation(descriptor.m_Operation)); + + auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor); + CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer); +} + void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer, const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor, const char* name) diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index babecdc056..fa3447de21 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -158,6 +158,10 @@ public: const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor, const char* name = nullptr) override; + void VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer, + const armnn::LogicalBinaryDescriptor& descriptor, + const char* name = nullptr) override; + void VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer, const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor, const char* name = nullptr) override; diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp index 5566abf7e4..045d6aac5c 100644 --- a/src/armnnSerializer/SerializerUtils.cpp +++ b/src/armnnSerializer/SerializerUtils.cpp @@ -28,6 +28,20 @@ armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::Com } } +armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation( + armnn::LogicalBinaryOperation logicalBinaryOperation) +{ + switch (logicalBinaryOperation) + { + case armnn::LogicalBinaryOperation::LogicalAnd: + return armnnSerializer::LogicalBinaryOperation::LogicalBinaryOperation_LogicalAnd; + case armnn::LogicalBinaryOperation::LogicalOr: + return armnnSerializer::LogicalBinaryOperation::LogicalBinaryOperation_LogicalOr; + default: + throw armnn::InvalidArgumentException("Logical Binary operation unknown"); + } +} + armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType) { switch (dataType) @@ -98,6 +112,8 @@ armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperatio return armnnSerializer::UnaryOperation::UnaryOperation_Exp; case armnn::UnaryOperation::Neg: return armnnSerializer::UnaryOperation::UnaryOperation_Neg; + case armnn::UnaryOperation::LogicalNot: + return armnnSerializer::UnaryOperation::UnaryOperation_LogicalNot; default: throw armnn::InvalidArgumentException("Unary operation unknown"); } diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp index edd48a5e25..a3cf5ba3c1 100644 --- a/src/armnnSerializer/SerializerUtils.hpp +++ b/src/armnnSerializer/SerializerUtils.hpp @@ -35,4 +35,7 @@ armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorith armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method); +armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation( + armnn::LogicalBinaryOperation logicalBinaryOperation); + } // namespace armnnSerializer diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index e00fb4dcde..6866391e0f 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1626,6 +1626,74 @@ BOOST_AUTO_TEST_CASE(EnsureL2NormalizationBackwardCompatibility) deserializedNetwork->Accept(verifier); } +BOOST_AUTO_TEST_CASE(SerializeLogicalBinary) +{ + DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(LogicalBinary) + + const std::string layerName("logicalBinaryAnd"); + + const armnn::TensorShape shape{2, 1, 2, 2}; + + const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + + armnn::LogicalBinaryDescriptor descriptor(armnn::LogicalBinaryOperation::LogicalAnd); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); + armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + armnn::IConnectableLayer* const logicalBinaryLayer = network->AddLogicalBinaryLayer(descriptor, layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer0->GetOutputSlot(0).Connect(logicalBinaryLayer->GetInputSlot(0)); + inputLayer1->GetOutputSlot(0).Connect(logicalBinaryLayer->GetInputSlot(1)); + logicalBinaryLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer0->GetOutputSlot(0).SetTensorInfo(inputInfo); + inputLayer1->GetOutputSlot(0).SetTensorInfo(inputInfo); + logicalBinaryLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + LogicalBinaryLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor); + deserializedNetwork->Accept(verifier); +} + +BOOST_AUTO_TEST_CASE(SerializeLogicalUnary) +{ + DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(ElementwiseUnary) + + const std::string layerName("elementwiseUnaryLogicalNot"); + + const armnn::TensorShape shape{2, 1, 2, 2}; + + const armnn::TensorInfo inputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + const armnn::TensorInfo outputInfo = armnn::TensorInfo(shape, armnn::DataType::Boolean); + + armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::LogicalNot); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const elementwiseUnaryLayer = + network->AddElementwiseUnaryLayer(descriptor, layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(elementwiseUnaryLayer->GetInputSlot(0)); + elementwiseUnaryLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + elementwiseUnaryLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + + BOOST_CHECK(deserializedNetwork); + + ElementwiseUnaryLayerVerifier verifier(layerName, { inputInfo }, { outputInfo }, descriptor); + + deserializedNetwork->Accept(verifier); +} + BOOST_AUTO_TEST_CASE(SerializeLogSoftmax) { DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(LogSoftmax) diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 92c1023583..543591091b 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -313,13 +313,30 @@ bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo&, // in } bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo&, // input - const TensorInfo&, // output + const TensorInfo&, // output const L2NormalizationDescriptor&, // descriptor Optional reasonIfUnsupported) const { return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsLogicalBinarySupported(const TensorInfo&, // input0 + const TensorInfo&, // input1 + const TensorInfo&, // output + const LogicalBinaryDescriptor&, // descriptor + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + +bool LayerSupportBase::IsLogicalUnarySupported(const TensorInfo&, // input + const TensorInfo&, // output + const ElementwiseUnaryDescriptor&, // descriptor + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo&, // input const TensorInfo&, // output const LogSoftmaxDescriptor&, // descriptor diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 13fd39ea2e..7b873e3d6c 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -191,6 +191,17 @@ public: const L2NormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsLogicalBinarySupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + const LogicalBinaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsLogicalUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsLogSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const LogSoftmaxDescriptor& descriptor, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 6d88664728..b39d6b3c4c 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -3534,7 +3534,21 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) DataType::Signed32 }; - ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + std::vector logicalSupportedTypes = + { + DataType::Boolean + }; + + if (m_Parameters.m_Operation == UnaryOperation::LogicalNot) + { + ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName); + } + else + { + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + } + + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); } @@ -3567,4 +3581,38 @@ void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName); } +void LogicalBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"LogicalBinaryQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 2); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + ValidateBroadcastTensorShapesMatch(inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo, + descriptorName, + "input_0", + "input_1"); + + if (inputTensorInfo0.GetDataType() != DataType::Boolean) + { + throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean."); + } + + if (inputTensorInfo1.GetDataType() != DataType::Boolean) + { + throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean."); + } + + if (outputTensorInfo.GetDataType() != DataType::Boolean) + { + throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean."); + } +} + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 952ddc323a..dd39d312b7 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -663,4 +663,9 @@ struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 5e3eed086a..3a8a2ae18f 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -458,6 +458,21 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, reason); break; } + case LayerType::LogicalBinary: + { + auto cLayer = PolymorphicDowncast(&layer); + + const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsLogicalBinarySupported(input0, + input1, + output, + cLayer->GetParameters(), + reason); + break; + } case LayerType::LogSoftmax: { auto cLayer = PolymorphicDowncast(&layer); @@ -1441,6 +1456,18 @@ std::unique_ptr IWorkloadFactory::CreateL2Normalization(const L2Norma return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr(); +} + +std::unique_ptr IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 5096c3ba51..df08b9a81d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -159,6 +159,12 @@ public: virtual std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + + virtual std::unique_ptr CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + virtual std::unique_ptr CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index dd47d0a31f..7254d21f05 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -64,6 +64,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/GatherTestImpl.cpp \ test/layerTests/InstanceNormalizationTestImpl.cpp \ test/layerTests/L2NormalizationTestImpl.cpp \ + test/layerTests/LogicalTestImpl.cpp \ test/layerTests/LogSoftmaxTestImpl.cpp \ test/layerTests/LstmTestImpl.cpp \ test/layerTests/MaximumTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index a1271cdd99..7894895c39 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -110,6 +110,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/L2NormalizationTestImpl.cpp layerTests/L2NormalizationTestImpl.hpp layerTests/LayerTestResult.hpp + layerTests/LogicalTestImpl.cpp + layerTests/LogicalTestImpl.hpp layerTests/LogSoftmaxTestImpl.cpp layerTests/LogSoftmaxTestImpl.hpp layerTests/LstmTestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index a8465b45f6..7c7ad5f159 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -614,6 +614,8 @@ DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization) DECLARE_LAYER_POLICY_2_PARAM(L2Normalization) +DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary) + DECLARE_LAYER_POLICY_2_PARAM(LogSoftmax) DECLARE_LAYER_POLICY_2_PARAM(Lstm) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index a4615914d3..e9eb5b9553 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp new file mode 100644 index 0000000000..2225de31e4 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp @@ -0,0 +1,572 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "LogicalTestImpl.hpp" + +#include +#include + +#include +#include + +#include +#include + +#include + +namespace { + +template +LayerTestResult LogicalUnaryTestHelper( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::UnaryOperation op, + const armnn::TensorShape& inputShape, + std::vector input, + const armnn::TensorShape& outputShape, + std::vector expectedOutput, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + ARMNN_ASSERT(inputShape.GetNumDimensions() == NumDims); + armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Boolean); + + ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims); + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean); + + auto inputTensor = MakeTensor(inputTensorInfo, input); + + LayerTestResult ret(outputTensorInfo); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ElementwiseUnaryDescriptor desc(op); + armnn::ElementwiseUnaryQueueDescriptor qDesc; + qDesc.m_Parameters = desc; + + armnn::WorkloadInfo info; + AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get()); + + auto workload = workloadFactory.CreateLogicalUnary(qDesc, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + + ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); + ret.compareBoolean = true; + return ret; +} + +template +LayerTestResult LogicalBinaryTestHelper( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::LogicalBinaryOperation op, + const armnn::TensorShape& inputShape0, + const armnn::TensorShape& inputShape1, + std::vector input0, + std::vector input1, + const armnn::TensorShape& outputShape, + std::vector expectedOutput, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + ARMNN_ASSERT(inputShape0.GetNumDimensions() == NumDims); + armnn::TensorInfo inputTensorInfo0(inputShape0, armnn::DataType::Boolean); + + ARMNN_ASSERT(inputShape1.GetNumDimensions() == NumDims); + armnn::TensorInfo inputTensorInfo1(inputShape1, armnn::DataType::Boolean); + + ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims); + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean); + + auto inputTensor0 = MakeTensor(inputTensorInfo0, input0); + auto inputTensor1 = MakeTensor(inputTensorInfo1, input1); + + LayerTestResult ret(outputTensorInfo); + + std::unique_ptr inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0); + std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::LogicalBinaryDescriptor desc(op); + armnn::LogicalBinaryQueueDescriptor qDesc; + qDesc.m_Parameters = desc; + + armnn::WorkloadInfo info; + AddInputToWorkload(qDesc, info, inputTensorInfo0, inputHandle0.get()); + AddInputToWorkload(qDesc, info, inputTensorInfo1, inputHandle1.get()); + AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get()); + + auto workload = workloadFactory.CreateLogicalBinary(qDesc, info); + + inputHandle0->Allocate(); + inputHandle1->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle0.get(), inputTensor0.origin()); + CopyDataToITensorHandle(inputHandle1.get(), inputTensor1.origin()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + + ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); + ret.compareBoolean = true; + return ret; +} + +class UnaryTestData +{ +public: + UnaryTestData() = default; + virtual ~UnaryTestData() = default; + + armnn::TensorShape m_InputShape; + armnn::TensorShape m_OutputShape; + + std::vector m_InputData; + + std::vector m_OutputNot; +}; + +class BinaryTestData +{ +public: + BinaryTestData() = default; + virtual ~BinaryTestData() = default; + + armnn::TensorShape m_InputShape0; + armnn::TensorShape m_InputShape1; + armnn::TensorShape m_OutputShape; + + std::vector m_InputData0; + std::vector m_InputData1; + + std::vector m_OutputAnd; + std::vector m_OutputOr; +}; + +class SimpleUnaryTestData : public UnaryTestData +{ +public: + SimpleUnaryTestData() : UnaryTestData() + { + m_InputShape = { 1, 1, 1, 4 }; + m_OutputShape = m_InputShape; + + m_InputData = + { + true, false, false, true + }; + + m_OutputNot = + { + false, true, true, false + }; + } +}; + +class SimpleUnaryIntTestData : public UnaryTestData +{ +public: + SimpleUnaryIntTestData() : UnaryTestData() + { + m_InputShape = { 1, 1, 1, 4 }; + m_OutputShape = m_InputShape; + + m_InputData = + { + 1, 11, 111, 0 + }; + + m_OutputNot = + { + 0, 0, 0, 1 + }; + } +}; + +class SimpleBinaryTestData : public BinaryTestData +{ +public: + SimpleBinaryTestData() : BinaryTestData() + { + m_InputShape0 = { 1, 1, 1, 4 }; + m_InputShape1 = m_InputShape0; + m_OutputShape = m_InputShape1; + + m_InputData0 = + { + true, false, false, true + }; + + m_InputData1 = + { + true, false, true, false + }; + + m_OutputAnd = + { + true, false, false, false + }; + + m_OutputOr = + { + true, false, true, true + }; + } +}; + +class SimpleBinaryIntTestData : public BinaryTestData +{ +public: + SimpleBinaryIntTestData() : BinaryTestData() + { + m_InputShape0 = { 1, 1, 1, 4 }; + m_InputShape1 = m_InputShape0; + m_OutputShape = m_InputShape1; + + m_InputData0 = + { + 1, 11, 111, 0 + }; + + m_InputData1 = + { + 0, 111, 111, 0 + }; + + m_OutputAnd = + { + 0, 1, 1, 0 + }; + + m_OutputOr = + { + 1, 1, 1, 0 + }; + } +}; + +class BroadcastBinary1TestData : public BinaryTestData +{ +public: + BroadcastBinary1TestData() : BinaryTestData() + { + m_InputShape0 = { 1, 1, 1, 4 }; + m_InputShape1 = { 1, 1, 1, 1 }; + m_OutputShape = m_InputShape0; + + m_InputData0 = + { + true, false, false, true + }; + + m_InputData1 = + { + true + }; + + m_OutputAnd = + { + true, false, false, true + }; + + m_OutputOr = + { + true, true, true, true + }; + } +}; + +class BroadcastBinary2TestData : public BinaryTestData +{ +public: + BroadcastBinary2TestData() : BinaryTestData() + { + m_InputShape0 = { 1, 1, 1, 1 }; + m_InputShape1 = { 1, 1, 1, 4 }; + m_OutputShape = m_InputShape1; + + m_InputData0 = + { + true + }; + + m_InputData1 = + { + true, false, false, true + }; + + m_OutputAnd = + { + true, false, false, true + }; + + m_OutputOr = + { + true, true, true, true + }; + } +}; + +class BroadcastBinary3TestData : public BinaryTestData +{ +public: + BroadcastBinary3TestData() : BinaryTestData() + { + m_InputShape0 = { 1, 1, 1, 4 }; + m_InputShape1 = { 1, 1, 1, 1 }; + m_OutputShape = m_InputShape0; + + m_InputData0 = + { + true, false, false, true + }; + + m_InputData1 = + { + false + }; + + m_OutputAnd = + { + false, false, false, false + }; + + m_OutputOr = + { + true, false, false, true + }; + } +}; + +static SimpleUnaryTestData s_SimpleUnaryTestData; +static SimpleBinaryTestData s_SimpleBinaryTestData; + +static SimpleUnaryIntTestData s_SimpleUnaryIntTestData; +static SimpleBinaryIntTestData s_SimpleBinaryIntTestData; + +static BroadcastBinary1TestData s_BroadcastBinary1TestData; +static BroadcastBinary2TestData s_BroadcastBinary2TestData; +static BroadcastBinary3TestData s_BroadcastBinary3TestData; + + +} // anonymous namespace + +// Unary - Not +LayerTestResult LogicalNotTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalUnaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::UnaryOperation::LogicalNot, + s_SimpleUnaryTestData.m_InputShape, + s_SimpleUnaryTestData.m_InputData, + s_SimpleUnaryTestData.m_OutputShape, + s_SimpleUnaryTestData.m_OutputNot, + tensorHandleFactory); +} + +// Unary - Not with integers +LayerTestResult LogicalNotIntTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalUnaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::UnaryOperation::LogicalNot, + s_SimpleUnaryIntTestData.m_InputShape, + s_SimpleUnaryIntTestData.m_InputData, + s_SimpleUnaryIntTestData.m_OutputShape, + s_SimpleUnaryIntTestData.m_OutputNot, + tensorHandleFactory); +} + +// Binary - And +LayerTestResult LogicalAndTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalAnd, + s_SimpleBinaryTestData.m_InputShape0, + s_SimpleBinaryTestData.m_InputShape1, + s_SimpleBinaryTestData.m_InputData0, + s_SimpleBinaryTestData.m_InputData1, + s_SimpleBinaryTestData.m_OutputShape, + s_SimpleBinaryTestData.m_OutputAnd, + tensorHandleFactory); +} + +// Binary - Or +LayerTestResult LogicalOrTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalOr, + s_SimpleBinaryTestData.m_InputShape0, + s_SimpleBinaryTestData.m_InputShape1, + s_SimpleBinaryTestData.m_InputData0, + s_SimpleBinaryTestData.m_InputData1, + s_SimpleBinaryTestData.m_OutputShape, + s_SimpleBinaryTestData.m_OutputOr, + tensorHandleFactory); +} + +// Binary - And with integers +LayerTestResult LogicalAndIntTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalAnd, + s_SimpleBinaryIntTestData.m_InputShape0, + s_SimpleBinaryIntTestData.m_InputShape1, + s_SimpleBinaryIntTestData.m_InputData0, + s_SimpleBinaryIntTestData.m_InputData1, + s_SimpleBinaryIntTestData.m_OutputShape, + s_SimpleBinaryIntTestData.m_OutputAnd, + tensorHandleFactory); +} + +// Binary - Or with integers +LayerTestResult LogicalOrIntTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalOr, + s_SimpleBinaryIntTestData.m_InputShape0, + s_SimpleBinaryIntTestData.m_InputShape1, + s_SimpleBinaryIntTestData.m_InputData0, + s_SimpleBinaryIntTestData.m_InputData1, + s_SimpleBinaryIntTestData.m_OutputShape, + s_SimpleBinaryIntTestData.m_OutputOr, + tensorHandleFactory); +} + +// Binary - And Broadcast +LayerTestResult LogicalAndBroadcast1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalAnd, + s_BroadcastBinary1TestData.m_InputShape0, + s_BroadcastBinary1TestData.m_InputShape1, + s_BroadcastBinary1TestData.m_InputData0, + s_BroadcastBinary1TestData.m_InputData1, + s_BroadcastBinary1TestData.m_OutputShape, + s_BroadcastBinary1TestData.m_OutputAnd, + tensorHandleFactory); +} + +// Binary - Or Broadcast +LayerTestResult LogicalOrBroadcast1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalOr, + s_BroadcastBinary1TestData.m_InputShape0, + s_BroadcastBinary1TestData.m_InputShape1, + s_BroadcastBinary1TestData.m_InputData0, + s_BroadcastBinary1TestData.m_InputData1, + s_BroadcastBinary1TestData.m_OutputShape, + s_BroadcastBinary1TestData.m_OutputOr, + tensorHandleFactory); +} + +// Binary - And Broadcast +LayerTestResult LogicalAndBroadcast2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalAnd, + s_BroadcastBinary2TestData.m_InputShape0, + s_BroadcastBinary2TestData.m_InputShape1, + s_BroadcastBinary2TestData.m_InputData0, + s_BroadcastBinary2TestData.m_InputData1, + s_BroadcastBinary2TestData.m_OutputShape, + s_BroadcastBinary2TestData.m_OutputAnd, + tensorHandleFactory); +} + +// Binary - Or Broadcast +LayerTestResult LogicalOrBroadcast2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalOr, + s_BroadcastBinary2TestData.m_InputShape0, + s_BroadcastBinary2TestData.m_InputShape1, + s_BroadcastBinary2TestData.m_InputData0, + s_BroadcastBinary2TestData.m_InputData1, + s_BroadcastBinary2TestData.m_OutputShape, + s_BroadcastBinary2TestData.m_OutputOr, + tensorHandleFactory); +} + +// Binary - And Broadcast +LayerTestResult LogicalAndBroadcast3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalAnd, + s_BroadcastBinary3TestData.m_InputShape0, + s_BroadcastBinary3TestData.m_InputShape1, + s_BroadcastBinary3TestData.m_InputData0, + s_BroadcastBinary3TestData.m_InputData1, + s_BroadcastBinary3TestData.m_OutputShape, + s_BroadcastBinary3TestData.m_OutputAnd, + tensorHandleFactory); +} + +// Binary - Or Broadcast +LayerTestResult LogicalOrBroadcast3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return LogicalBinaryTestHelper<4>(workloadFactory, + memoryManager, + armnn::LogicalBinaryOperation::LogicalOr, + s_BroadcastBinary3TestData.m_InputShape0, + s_BroadcastBinary3TestData.m_InputShape1, + s_BroadcastBinary3TestData.m_InputData0, + s_BroadcastBinary3TestData.m_InputData1, + s_BroadcastBinary3TestData.m_OutputShape, + s_BroadcastBinary3TestData.m_OutputOr, + tensorHandleFactory); +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp new file mode 100644 index 0000000000..1711d90d5a --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp @@ -0,0 +1,83 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include +#include + +// Unary - Logical Not +LayerTestResult LogicalNotTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Unary - Logical Not with integers +LayerTestResult LogicalNotIntTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical And +LayerTestResult LogicalAndTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical Or +LayerTestResult LogicalOrTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical And with integers +LayerTestResult LogicalAndIntTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical Or with integers +LayerTestResult LogicalOrIntTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical And Broadcast +LayerTestResult LogicalAndBroadcast1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical Or Broadcast +LayerTestResult LogicalOrBroadcast1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical And Broadcast +LayerTestResult LogicalAndBroadcast2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical Or Broadcast +LayerTestResult LogicalOrBroadcast2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical And Broadcast +LayerTestResult LogicalAndBroadcast3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +// Binary - Logical Or Broadcast +LayerTestResult LogicalOrBroadcast3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 52c079fae4..f48c120203 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1105,6 +1105,53 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + const LogicalBinaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + IgnoreUnused(descriptor); + + std::array supportedTypes = + { + DataType::Boolean + }; + + bool supported = true; + supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported, + "Reference LogicalBinary: input 0 type not supported"); + supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported, + "Reference LogicalBinary: input 1 type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported, + "Reference LogicalBinary: input and output types do not match"); + + return supported; +} + +bool RefLayerSupport::IsLogicalUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + IgnoreUnused(descriptor); + + std::array supportedTypes = + { + DataType::Boolean + }; + + bool supported = true; + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference LogicalUnary: input type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference LogicalUnary: input and output types do not match"); + + return supported; +} + bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const LogSoftmaxDescriptor& descriptor, diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index a233082aaa..318eb4064b 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -182,6 +182,17 @@ public: const L2NormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsLogicalBinarySupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + const LogicalBinaryDescriptor& descriptor, + Optional reasonIfUnsupported) const override; + + bool IsLogicalUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const override; + bool IsLogSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const LogSoftmaxDescriptor& descriptor, diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index e7e57b15d1..9080028e72 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -401,6 +401,19 @@ std::unique_ptr RefWorkloadFactory::CreateL2Normalization(const L2Nor return std::make_unique(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + +std::unique_ptr RefWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + + std::unique_ptr RefWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 5f22c9eac2..8c3d719ae0 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -162,6 +162,12 @@ public: std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + std::unique_ptr CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index bf5f340afc..b4aa3a0953 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -69,6 +69,8 @@ BACKEND_SOURCES := \ workloads/RefGatherWorkload.cpp \ workloads/RefInstanceNormalizationWorkload.cpp \ workloads/RefL2NormalizationWorkload.cpp \ + workloads/RefLogicalBinaryWorkload.cpp \ + workloads/RefLogicalUnaryWorkload.cpp \ workloads/RefLogSoftmaxWorkload.cpp \ workloads/RefLstmWorkload.cpp \ workloads/RefMeanWorkload.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 7542a64711..60400c514e 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -2215,4 +2215,17 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dQuantisedAsymm8, Exp3dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dQuantisedSymm16, Exp3dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOr, LogicalOrTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndInt, LogicalAndIntTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrInt, LogicalOrIntTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast1, LogicalAndBroadcast1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast1, LogicalOrBroadcast1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast2, LogicalAndBroadcast2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast2, LogicalOrBroadcast2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast3, LogicalAndBroadcast3Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast3, LogicalOrBroadcast3Test) + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index a10f383e90..73e24691d9 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -515,6 +515,40 @@ public: } }; +class BooleanDecoderBool : public TypedIterator> +{ +public: + BooleanDecoderBool(const uint8_t* data) + : TypedIterator(data) {} + + BooleanDecoderBool() + : BooleanDecoderBool(nullptr) {} + + bool Get() const override + { + return *m_Iterator; + } + + std::vector DecodeTensor(const TensorShape& tensorShape, + const unsigned int channelMultiplier, + const bool isDepthwise) override + { + IgnoreUnused(channelMultiplier, isDepthwise); + + const unsigned int size = tensorShape.GetNumElements(); + std::vector decodedTensor; + decodedTensor.reserve(size); + + for (uint32_t i = 0; i < size; ++i) + { + this->operator[](i); + decodedTensor.emplace_back(*m_Iterator); + } + + return decodedTensor; + } +}; + class QASymm8Encoder : public TypedIterator> { public: diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index cd9efc96af..1b20e5bf2d 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -107,6 +107,10 @@ list(APPEND armnnRefBackendWorkloads_sources RefInstanceNormalizationWorkload.hpp RefL2NormalizationWorkload.cpp RefL2NormalizationWorkload.hpp + RefLogicalBinaryWorkload.cpp + RefLogicalBinaryWorkload.hpp + RefLogicalUnaryWorkload.cpp + RefLogicalUnaryWorkload.hpp RefLogSoftmaxWorkload.cpp RefLogSoftmaxWorkload.hpp RefLstmWorkload.cpp diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index 08e0140fad..0b3f36047d 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -149,6 +149,24 @@ inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const return nullptr; } +template<> +inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const void* data) +{ + switch(info.GetDataType()) + { + case DataType::Boolean: + { + return std::make_unique(static_cast(data)); + } + default: + { + ARMNN_ASSERT_MSG(false, "Unsupported Data Type!"); + break; + } + } + return nullptr; +} + template<> inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const void* data) { diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp index afae188bd6..d6f3f42478 100644 --- a/src/backends/reference/workloads/ElementwiseFunction.cpp +++ b/src/backends/reference/workloads/ElementwiseFunction.cpp @@ -37,6 +37,26 @@ ElementwiseUnaryFunction::ElementwiseUnaryFunction(const TensorShape& i BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData); } +template +LogicalBinaryFunction::LogicalBinaryFunction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + Decoder& inData0, + Decoder& inData1, + Encoder& outData) +{ + BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData); +} + +template +LogicalUnaryFunction::LogicalUnaryFunction(const TensorShape& inShape, + const TensorShape& outShape, + Decoder& inData, + Encoder& outData) +{ + BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData); +} + } //namespace armnn template struct armnn::ElementwiseBinaryFunction>; @@ -67,3 +87,8 @@ template struct armnn::ElementwiseUnaryFunction>; template struct armnn::ElementwiseUnaryFunction>; template struct armnn::ElementwiseUnaryFunction>; template struct armnn::ElementwiseUnaryFunction>; + +// Logical Unary +template struct armnn::LogicalUnaryFunction>; +template struct armnn::LogicalBinaryFunction>; +template struct armnn::LogicalBinaryFunction>; diff --git a/src/backends/reference/workloads/ElementwiseFunction.hpp b/src/backends/reference/workloads/ElementwiseFunction.hpp index 8259ba5ac7..ef4a2dc7d5 100644 --- a/src/backends/reference/workloads/ElementwiseFunction.hpp +++ b/src/backends/reference/workloads/ElementwiseFunction.hpp @@ -37,4 +37,30 @@ struct ElementwiseUnaryFunction Encoder& outData); }; +template +struct LogicalBinaryFunction +{ + using OutType = bool; + using InType = bool; + + LogicalBinaryFunction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + Decoder& inData0, + Decoder& inData1, + Encoder& outData); +}; + +template +struct LogicalUnaryFunction +{ + using OutType = bool; + using InType = bool; + + LogicalUnaryFunction(const TensorShape& inShape, + const TensorShape& outShape, + Decoder& inData, + Encoder& outData); +}; + } //namespace armnn diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp new file mode 100644 index 0000000000..1b4e8f9aa0 --- /dev/null +++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp @@ -0,0 +1,75 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefLogicalBinaryWorkload.hpp" + +#include "Decoders.hpp" +#include "ElementwiseFunction.hpp" +#include "Encoders.hpp" +#include "RefWorkloadUtils.hpp" + +#include + +#include + +namespace armnn +{ + +RefLogicalBinaryWorkload::RefLogicalBinaryWorkload(const LogicalBinaryQueueDescriptor& desc, + const WorkloadInfo& info) + : BaseWorkload(desc, info) +{} + +void RefLogicalBinaryWorkload::PostAllocationConfigure() +{ + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + m_Input0 = MakeDecoder(inputInfo0); + m_Input1 = MakeDecoder(inputInfo1); + m_Output = MakeEncoder(outputInfo); +} + +void RefLogicalBinaryWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalBinaryWorkload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const TensorShape& inShape0 = inputInfo0.GetShape(); + const TensorShape& inShape1 = inputInfo1.GetShape(); + const TensorShape& outShape = outputInfo.GetShape(); + + m_Input0->Reset(m_Data.m_Inputs[0]->Map()); + m_Input1->Reset(m_Data.m_Inputs[1]->Map()); + m_Output->Reset(m_Data.m_Outputs[0]->Map()); + + using AndFunction = LogicalBinaryFunction>; + using OrFunction = LogicalBinaryFunction>; + + switch (m_Data.m_Parameters.m_Operation) + { + case LogicalBinaryOperation::LogicalAnd: + { + AndFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output); + break; + } + case LogicalBinaryOperation::LogicalOr: + { + OrFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output); + break; + } + default: + { + throw InvalidArgumentException(std::string("Unsupported Logical Binary operation") + + GetLogicalBinaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION()); + } + } +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp new file mode 100644 index 0000000000..4d6baf5fa4 --- /dev/null +++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp @@ -0,0 +1,34 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "BaseIterator.hpp" + +#include +#include + +namespace armnn +{ + +class RefLogicalBinaryWorkload : public BaseWorkload +{ +public: + using BaseWorkload::m_Data; + + RefLogicalBinaryWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info); + void PostAllocationConfigure() override; + virtual void Execute() const override; + +private: + using InType = bool; + using OutType = bool; + + std::unique_ptr> m_Input0; + std::unique_ptr> m_Input1; + std::unique_ptr> m_Output; +}; + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp new file mode 100644 index 0000000000..76eb5ac39f --- /dev/null +++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp @@ -0,0 +1,64 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefLogicalUnaryWorkload.hpp" + +#include "Decoders.hpp" +#include "ElementwiseFunction.hpp" +#include "Encoders.hpp" +#include "RefWorkloadUtils.hpp" + +#include + +#include + +namespace armnn +{ + +RefLogicalUnaryWorkload::RefLogicalUnaryWorkload(const ElementwiseUnaryQueueDescriptor& desc, + const WorkloadInfo& info) + : BaseWorkload(desc, info) +{} + +void RefLogicalUnaryWorkload::PostAllocationConfigure() +{ + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + m_Input = MakeDecoder(inputInfo); + m_Output = MakeEncoder(outputInfo); +} + +void RefLogicalUnaryWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalUnaryWorkload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const TensorShape& inShape = inputInfo.GetShape(); + const TensorShape& outShape = outputInfo.GetShape(); + + m_Input->Reset(m_Data.m_Inputs[0]->Map()); + m_Output->Reset(m_Data.m_Outputs[0]->Map()); + + using NotFunction = LogicalUnaryFunction>; + + switch (m_Data.m_Parameters.m_Operation) + { + case UnaryOperation::LogicalNot: + { + NotFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + default: + { + throw InvalidArgumentException(std::string("Unsupported Logical Unary operation") + + GetUnaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION()); + } + } +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp new file mode 100644 index 0000000000..0d8b35495c --- /dev/null +++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp @@ -0,0 +1,33 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "BaseIterator.hpp" + +#include +#include + +namespace armnn +{ + +class RefLogicalUnaryWorkload : public BaseWorkload +{ +public: + using BaseWorkload::m_Data; + + RefLogicalUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info); + void PostAllocationConfigure() override; + virtual void Execute() const override; + +private: + using InType = bool; + using OutType = bool; + + std::unique_ptr> m_Input; + std::unique_ptr> m_Output; +}; + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index fc47cff84f..390b2a8d55 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -41,6 +41,8 @@ #include "RefGatherWorkload.hpp" #include "RefInstanceNormalizationWorkload.hpp" #include "RefL2NormalizationWorkload.hpp" +#include "RefLogicalBinaryWorkload.hpp" +#include "RefLogicalUnaryWorkload.hpp" #include "RefLogSoftmaxWorkload.hpp" #include "RefLstmWorkload.hpp" #include "RefMeanWorkload.hpp" -- cgit v1.2.1