diff options
author | James Conroy <james.conroy@arm.com> | 2020-11-06 16:28:18 +0000 |
---|---|---|
committer | James Conroy <james.conroy@arm.com> | 2020-11-09 18:26:28 +0000 |
commit | aba90cd608eb65ab459cd71a6724511a1507763b (patch) | |
tree | 8c83548e02de2bc6c34811ea2eb9c3dac0976068 /src/armnn/layers/LogicalBinaryLayer.cpp | |
parent | c9bc80e1d93d27ad298133c7345627e6a946fb92 (diff) | |
download | armnn-aba90cd608eb65ab459cd71a6724511a1507763b.tar.gz |
IVGCVSW-5091 Add Logical ops frontend and ref impl
* Add frontend and reference implementation for logical
ops NOT, AND, OR.
* Unary NOT uses existing ElementwiseUnary layer and
ElementwiseUnary descriptor.
* Binary AND/OR uses new layer LogicalBinary and new
LogicalBinary descriptor.
* Add serialization/deserializion support and add missing
ElementwiseUnary deserializer code.
* Add additional Boolean decoder in BaseIterator.hpp.
Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: Id343b01174053a166de1b98b6175e04a5065f720
Diffstat (limited to 'src/armnn/layers/LogicalBinaryLayer.cpp')
-rw-r--r-- | src/armnn/layers/LogicalBinaryLayer.cpp | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp new file mode 100644 index 0000000000..0ae5ea5641 --- /dev/null +++ b/src/armnn/layers/LogicalBinaryLayer.cpp @@ -0,0 +1,80 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "LogicalBinaryLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +#include <algorithm> + +namespace armnn +{ + +LogicalBinaryLayer::LogicalBinaryLayer(const LogicalBinaryDescriptor& param, const char* name) + : LayerWithParameters(2, 1, LayerType::LogicalBinary, param, name) +{ +} + +std::unique_ptr<IWorkload> LogicalBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + LogicalBinaryQueueDescriptor descriptor; + return factory.CreateLogicalBinary(descriptor, PrepInfoAndDesc(descriptor)); +} + +LogicalBinaryLayer* LogicalBinaryLayer::Clone(Graph& graph) const +{ + return CloneBase<LogicalBinaryLayer>(graph, m_Param, GetName()); +} + +std::vector<TensorShape> LogicalBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const +{ + ARMNN_ASSERT(inputShapes.size() == 2); + const TensorShape& input0 = inputShapes[0]; + const TensorShape& input1 = inputShapes[1]; + + ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); + unsigned int numDims = input0.GetNumDimensions(); + + std::vector<unsigned int> dims(numDims); + for (unsigned int i = 0; i < numDims; i++) + { + unsigned int dim0 = input0[i]; + unsigned int dim1 = input1[i]; + + ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1, + "Dimensions should either match or one should be of size 1."); + + dims[i] = std::max(dim0, dim1); + } + + return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) }); +} + +void LogicalBinaryLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(2, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + std::vector<TensorShape> inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() + }); + ARMNN_ASSERT(inferredShapes.size() == 1); + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer"); +} + +void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn |