aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2020-11-06 16:28:18 +0000
committerJames Conroy <james.conroy@arm.com>2020-11-09 18:26:28 +0000
commitaba90cd608eb65ab459cd71a6724511a1507763b (patch)
tree8c83548e02de2bc6c34811ea2eb9c3dac0976068 /src/armnn/layers
parentc9bc80e1d93d27ad298133c7345627e6a946fb92 (diff)
downloadarmnn-aba90cd608eb65ab459cd71a6724511a1507763b.tar.gz
IVGCVSW-5091 Add Logical ops frontend and ref impl
* Add frontend and reference implementation for logical ops NOT, AND, OR. * Unary NOT uses existing ElementwiseUnary layer and ElementwiseUnary descriptor. * Binary AND/OR uses new layer LogicalBinary and new LogicalBinary descriptor. * Add serialization/deserializion support and add missing ElementwiseUnary deserializer code. * Add additional Boolean decoder in BaseIterator.hpp. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: Id343b01174053a166de1b98b6175e04a5065f720
Diffstat (limited to 'src/armnn/layers')
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.cpp80
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.hpp50
2 files changed, 130 insertions, 0 deletions
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
new file mode 100644
index 0000000000..0ae5ea5641
--- /dev/null
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -0,0 +1,80 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LogicalBinaryLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <algorithm>
+
+namespace armnn
+{
+
+LogicalBinaryLayer::LogicalBinaryLayer(const LogicalBinaryDescriptor& param, const char* name)
+ : LayerWithParameters(2, 1, LayerType::LogicalBinary, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> LogicalBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ LogicalBinaryQueueDescriptor descriptor;
+ return factory.CreateLogicalBinary(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+LogicalBinaryLayer* LogicalBinaryLayer::Clone(Graph& graph) const
+{
+ return CloneBase<LogicalBinaryLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> LogicalBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 2);
+ const TensorShape& input0 = inputShapes[0];
+ const TensorShape& input1 = inputShapes[1];
+
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ unsigned int numDims = input0.GetNumDimensions();
+
+ std::vector<unsigned int> dims(numDims);
+ for (unsigned int i = 0; i < numDims; i++)
+ {
+ unsigned int dim0 = input0[i];
+ unsigned int dim1 = input1[i];
+
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ "Dimensions should either match or one should be of size 1.");
+
+ dims[i] = std::max(dim0, dim1);
+ }
+
+ return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
+}
+
+void LogicalBinaryLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(2, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ std::vector<TensorShape> inferredShapes = InferOutputShapes({
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
+ });
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
+}
+
+void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp
new file mode 100644
index 0000000000..c6b024b36b
--- /dev/null
+++ b/src/armnn/layers/LogicalBinaryLayer.hpp
@@ -0,0 +1,50 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a Logical Binary operation.
+class LogicalBinaryLayer : public LayerWithParameters<LogicalBinaryDescriptor>
+{
+public:
+ /// Makes a workload for the LogicalBinary type
+ /// @param [in] graph The graph where this layer can be found
+ /// @param [in] factory The workload factory which will create the workload
+ /// @return A pointer to the created workload, or nullptr if not created
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer
+ /// @param [in] graph The graph into which this layer is being cloned
+ LogicalBinaryLayer* Clone(Graph& graph) const override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s) will lead to a valid configuration
+ /// of @ref LogicalBinaryLayer
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a LogicalBinaryLayer
+ /// @param [in] param LogicalBinaryDescriptor to configure the LogicalBinaryLayer
+ /// @param [in] name Optional name for the layer
+ LogicalBinaryLayer(const LogicalBinaryDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~LogicalBinaryLayer() = default;
+};
+
+} // namespace armnn \ No newline at end of file