aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-08 13:47:17 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-03-14 16:40:09 +0000
commit3ec3077b4eaedcc0c20ab5774bdbe365da541445 (patch)
treed601d2000897dec8691bf64cbddc9036f26b8034 /src/armnn/layers
parenta088cd00b3cce672d26cdcb4965fc2a86b48f339 (diff)
downloadarmnn-3ec3077b4eaedcc0c20ab5774bdbe365da541445.tar.gz
IVGCVSW-3808 Add ElementwiseBinaryLayer
!android-nn-driver:9329 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary operations including Add, Div, Sub, Maximum, Mul and Minimum. * Updated Delegate to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add layer. * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated CL and Neon tests to use ElementwiseBinaryLayer. * Updated CL and Neon Backend Specific Optimizations to accept ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum layers. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
Diffstat (limited to 'src/armnn/layers')
-rw-r--r--src/armnn/layers/ElementwiseBinaryLayer.cpp89
-rw-r--r--src/armnn/layers/ElementwiseBinaryLayer.hpp48
2 files changed, 137 insertions, 0 deletions
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.cpp b/src/armnn/layers/ElementwiseBinaryLayer.cpp
new file mode 100644
index 0000000000..ae1813f33a
--- /dev/null
+++ b/src/armnn/layers/ElementwiseBinaryLayer.cpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseBinaryLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+ElementwiseBinaryLayer::ElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& param, const char* name)
+ : LayerWithParameters(2, 1, LayerType::ElementwiseBinary, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ElementwiseBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ ElementwiseBinaryQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::ElementwiseBinary, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ElementwiseBinaryLayer* ElementwiseBinaryLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ElementwiseBinaryLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ElementwiseBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 2);
+ TensorShape input0 = inputShapes[0];
+ TensorShape input1 = inputShapes[1];
+
+ if (inputShapes[0].GetNumDimensions() < inputShapes[1].GetNumDimensions())
+ {
+ input1 = inputShapes[0];
+ input0 = inputShapes[1];
+ }
+
+ unsigned int numDims = input0.GetNumDimensions();
+ unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions();
+
+ // Get the max of the inputs.
+ std::vector<unsigned int> dims(numDims);
+ for (unsigned int i = shiftedDims; i < numDims; i++)
+ {
+ unsigned int dim0 = input0[i];
+ unsigned int dim1 = input1[i - shiftedDims];
+
+ // Validate inputs are broadcast compatible.
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ "Dimensions should either match or one should be of size 1.");
+
+ dims[i] = std::max(dim0, dim1);
+ }
+
+ // Fill in the rest of the shifted dimensions.
+ for (unsigned int i = 0; i < shiftedDims; i++)
+ {
+ dims[i] = input0[i];
+ }
+
+ return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
+}
+
+void ElementwiseBinaryLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(2, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+}
+
+void ElementwiseBinaryLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+}
+} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.hpp b/src/armnn/layers/ElementwiseBinaryLayer.hpp
new file mode 100644
index 0000000000..78e3f41f9e
--- /dev/null
+++ b/src/armnn/layers/ElementwiseBinaryLayer.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a elementwiseBinary operation.
+class ElementwiseBinaryLayer : public LayerWithParameters<ElementwiseBinaryDescriptor>
+{
+public:
+ /// Makes a workload for the elementwiseBinary type
+ /// @param [in] graph The graph where this layer can be found
+ /// @param [in] factory The workload factory which will create the workload
+ /// @return A pointer to the created workload, or nullptr if not created
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer
+ /// @param [in] graph The graph into which this layer is being cloned
+ ElementwiseBinaryLayer* Clone(Graph& graph) const override;
+
+ /// Returns inputShapes by default.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s) will lead to a valid configuration
+ /// of @ref ElementwiseBinaryLayer
+ void ValidateTensorShapesFromInputs() override;
+
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
+protected:
+ /// Constructor to create a ElementwiseBinaryLayer
+ /// @param [in] param ElementwiseBinaryDescriptor to configure the ElementwiseBinaryLayer
+ /// @param [in] name Optional name for the layer
+ ElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ElementwiseBinaryLayer() = default;
+};
+
+} // namespace armnn