aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-08 13:47:17 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-03-14 16:40:09 +0000
commit3ec3077b4eaedcc0c20ab5774bdbe365da541445 (patch)
treed601d2000897dec8691bf64cbddc9036f26b8034 /src/armnn
parenta088cd00b3cce672d26cdcb4965fc2a86b48f339 (diff)
downloadarmnn-3ec3077b4eaedcc0c20ab5774bdbe365da541445.tar.gz
IVGCVSW-3808 Add ElementwiseBinaryLayer
!android-nn-driver:9329 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary operations including Add, Div, Sub, Maximum, Mul and Minimum. * Updated Delegate to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add layer. * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated CL and Neon tests to use ElementwiseBinaryLayer. * Updated CL and Neon Backend Specific Optimizations to accept ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum layers. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp16
-rw-r--r--src/armnn/LayersFwd.hpp4
-rw-r--r--src/armnn/Network.cpp18
-rw-r--r--src/armnn/Network.hpp5
-rw-r--r--src/armnn/layers/ElementwiseBinaryLayer.cpp89
-rw-r--r--src/armnn/layers/ElementwiseBinaryLayer.hpp48
-rw-r--r--src/armnn/optimizations/AddBroadcastReshapeLayer.hpp4
-rw-r--r--src/armnn/optimizations/MovePermuteUp.hpp8
-rw-r--r--src/armnn/optimizations/MoveTransposeUp.hpp8
-rw-r--r--src/armnn/test/GraphTests.cpp10
-rw-r--r--src/armnn/test/NetworkTests.cpp34
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp18
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp3
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp74
-rw-r--r--src/armnn/test/optimizations/MovePermuteUpTests.cpp17
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp17
16 files changed, 287 insertions, 86 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index af38ce8839..580c52c568 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -600,6 +600,22 @@ bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
reasonIfUnsupported);
}
+bool LayerSupportHandle::IsElementwiseBinarySupported(const TensorInfo &input0,
+ const TensorInfo &input1,
+ const TensorInfo &output,
+ const ElementwiseBinaryDescriptor &descriptor,
+ Optional<std::string &> reasonIfUnsupported)
+{
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported);
+}
+
bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
const TensorInfo& output,
const ElementwiseUnaryDescriptor& descriptor,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 43862d5072..f634272316 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -27,6 +27,7 @@
#include "layers/DequantizeLayer.hpp"
#include "layers/DetectionPostProcessLayer.hpp"
#include "layers/DivisionLayer.hpp"
+#include "layers/ElementwiseBinaryLayer.hpp"
#include "layers/ElementwiseUnaryLayer.hpp"
#include "layers/FakeQuantizationLayer.hpp"
#include "layers/FillLayer.hpp"
@@ -127,6 +128,7 @@ DECLARE_LAYER(DepthwiseConvolution2d)
DECLARE_LAYER(Dequantize)
DECLARE_LAYER(DetectionPostProcess)
DECLARE_LAYER(Division)
+DECLARE_LAYER(ElementwiseBinary)
DECLARE_LAYER(ElementwiseUnary)
DECLARE_LAYER(FakeQuantization)
DECLARE_LAYER(Fill)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 08d3280cfe..9ebb67b593 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,6 @@
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
#include <armnn/Exceptions.hpp>
-#include <armnn/Utils.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
@@ -36,10 +35,8 @@
#include <fcntl.h>
#include <algorithm>
-#include <fstream>
#include <memory>
#include <vector>
-#include <algorithm>
namespace armnn
{
@@ -58,7 +55,6 @@ IConnectableLayer* INetwork::AddInputLayer(LayerBindingId id, const char* name)
return pNetworkImpl->AddInputLayer(id, name);
}
-
IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
const char* name)
{
@@ -126,6 +122,11 @@ IConnectableLayer* INetwork::AddDetectionPostProcessLayer(
return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
}
+IConnectableLayer* INetwork::AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddElementwiseBinaryLayer(elementwiseBinaryDescriptor, name);
+}
IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name)
@@ -133,7 +134,6 @@ IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDesc
return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
}
-
IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
const char* name)
{
@@ -1853,6 +1853,12 @@ IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& c
return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
}
+IConnectableLayer* NetworkImpl::AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDesc,
+ const char* name)
+{
+ return m_Graph->AddLayer<ElementwiseBinaryLayer>(elementwiseBinaryDesc, name);
+}
+
IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index a37a4be218..03642ce993 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -95,6 +95,9 @@ public:
IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
+ IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name = nullptr);
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.cpp b/src/armnn/layers/ElementwiseBinaryLayer.cpp
new file mode 100644
index 0000000000..ae1813f33a
--- /dev/null
+++ b/src/armnn/layers/ElementwiseBinaryLayer.cpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseBinaryLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+ElementwiseBinaryLayer::ElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& param, const char* name)
+ : LayerWithParameters(2, 1, LayerType::ElementwiseBinary, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ElementwiseBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ ElementwiseBinaryQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::ElementwiseBinary, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ElementwiseBinaryLayer* ElementwiseBinaryLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ElementwiseBinaryLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ElementwiseBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 2);
+ TensorShape input0 = inputShapes[0];
+ TensorShape input1 = inputShapes[1];
+
+ if (inputShapes[0].GetNumDimensions() < inputShapes[1].GetNumDimensions())
+ {
+ input1 = inputShapes[0];
+ input0 = inputShapes[1];
+ }
+
+ unsigned int numDims = input0.GetNumDimensions();
+ unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions();
+
+ // Get the max of the inputs.
+ std::vector<unsigned int> dims(numDims);
+ for (unsigned int i = shiftedDims; i < numDims; i++)
+ {
+ unsigned int dim0 = input0[i];
+ unsigned int dim1 = input1[i - shiftedDims];
+
+ // Validate inputs are broadcast compatible.
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ "Dimensions should either match or one should be of size 1.");
+
+ dims[i] = std::max(dim0, dim1);
+ }
+
+ // Fill in the rest of the shifted dimensions.
+ for (unsigned int i = 0; i < shiftedDims; i++)
+ {
+ dims[i] = input0[i];
+ }
+
+ return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
+}
+
+void ElementwiseBinaryLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(2, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+}
+
+void ElementwiseBinaryLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+}
+} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.hpp b/src/armnn/layers/ElementwiseBinaryLayer.hpp
new file mode 100644
index 0000000000..78e3f41f9e
--- /dev/null
+++ b/src/armnn/layers/ElementwiseBinaryLayer.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a elementwiseBinary operation.
+class ElementwiseBinaryLayer : public LayerWithParameters<ElementwiseBinaryDescriptor>
+{
+public:
+ /// Makes a workload for the elementwiseBinary type
+ /// @param [in] graph The graph where this layer can be found
+ /// @param [in] factory The workload factory which will create the workload
+ /// @return A pointer to the created workload, or nullptr if not created
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer
+ /// @param [in] graph The graph into which this layer is being cloned
+ ElementwiseBinaryLayer* Clone(Graph& graph) const override;
+
+ /// Returns inputShapes by default.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s) will lead to a valid configuration
+ /// of @ref ElementwiseBinaryLayer
+ void ValidateTensorShapesFromInputs() override;
+
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
+protected:
+ /// Constructor to create a ElementwiseBinaryLayer
+ /// @param [in] param ElementwiseBinaryDescriptor to configure the ElementwiseBinaryLayer
+ /// @param [in] name Optional name for the layer
+ ElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ElementwiseBinaryLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index b9e8584a5a..dbde72b917 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -18,7 +18,7 @@ namespace optimizations
static const std::set<armnn::LayerType> broadcastOps{ LayerType::Addition, LayerType::Division,
LayerType::Maximum, LayerType::Minimum,
LayerType::Multiplication, LayerType::Prelu,
- LayerType::Subtraction };
+ LayerType::Subtraction, LayerType::ElementwiseBinary };
class AddBroadcastReshapeLayerImpl
{
diff --git a/src/armnn/optimizations/MovePermuteUp.hpp b/src/armnn/optimizations/MovePermuteUp.hpp
index ae8a28cba4..19078b3bc6 100644
--- a/src/armnn/optimizations/MovePermuteUp.hpp
+++ b/src/armnn/optimizations/MovePermuteUp.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2018,2020,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -68,6 +68,12 @@ private:
case LayerType::MemCopy:
case LayerType::Multiplication:
return true;
+ case LayerType::ElementwiseBinary:
+ {
+ auto descriptor = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&base.GetParameters());
+ return (descriptor->m_Operation == BinaryOperation::Add ||
+ descriptor->m_Operation == BinaryOperation::Mul);
+ }
default:
return false;
}
diff --git a/src/armnn/optimizations/MoveTransposeUp.hpp b/src/armnn/optimizations/MoveTransposeUp.hpp
index 999a4ebe42..40f6b9cdac 100644
--- a/src/armnn/optimizations/MoveTransposeUp.hpp
+++ b/src/armnn/optimizations/MoveTransposeUp.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -68,6 +68,12 @@ private:
case LayerType::MemCopy:
case LayerType::Multiplication:
return true;
+ case LayerType::ElementwiseBinary:
+ {
+ auto descriptor = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&base.GetParameters());
+ return (descriptor->m_Operation == BinaryOperation::Add ||
+ descriptor->m_Operation == BinaryOperation::Mul);
+ }
default:
return false;
}
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index eea7ae824a..b1b1a84ec9 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <GraphUtils.hpp>
@@ -36,7 +36,7 @@ TEST_CASE("TopologicalSort")
CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
- CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerC"));
CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
@@ -82,7 +82,7 @@ TEST_CASE("InsertNewLayerBefore")
CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
- CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerD"));
CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
@@ -168,7 +168,7 @@ TEST_CASE("InsertNewLayerAfter")
CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
- CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerD"));
CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
@@ -548,7 +548,7 @@ TEST_CASE_FIXTURE(CopyLayersFixture, "CopyLayersAddedBetweenSameLayersHaveDiffer
armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
- armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
+ auto* const additionLayer = graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "addition");
additionLayer->SetBackendId(armnn::Compute::CpuRef);
armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 058f079e46..0bfad4d4d1 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ TEST_CASE("LayerGuids")
{
armnn::NetworkImpl net;
LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
- LayerGuid addId = net.AddAdditionLayer()->GetGuid();
+ LayerGuid addId = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add)->GetGuid();
LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
CHECK(inputId != addId);
@@ -50,7 +50,7 @@ TEST_CASE("LayerNamesAreOptionalForINetwork")
{
armnn::INetworkPtr inet(armnn::INetwork::Create());
inet->AddInputLayer(0);
- inet->AddAdditionLayer();
+ inet->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
inet->AddActivationLayer(armnn::ActivationDescriptor());
inet->AddOutputLayer(0);
}
@@ -59,7 +59,7 @@ TEST_CASE("LayerNamesAreOptionalForNetwork")
{
armnn::NetworkImpl net;
net.AddInputLayer(0);
- net.AddAdditionLayer();
+ net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
net.AddActivationLayer(armnn::ActivationDescriptor());
net.AddOutputLayer(0);
}
@@ -136,13 +136,15 @@ TEST_CASE("NetworkModification")
softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
- armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition");
+ armnn::IConnectableLayer* const additionLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add,
+ "addition");
CHECK(additionLayer);
batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
- armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication");
+ armnn::IConnectableLayer* const multiplicationLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul,
+ "multiplication");
CHECK(multiplicationLayer);
additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
@@ -338,7 +340,7 @@ TEST_CASE("NetworkModification_SplitterAddition")
splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
// Adds addition layer.
- layer = net.AddAdditionLayer("add layer");
+ layer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add, "add layer");
CHECK(layer);
softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -382,7 +384,7 @@ TEST_CASE("NetworkModification_SplitterMultiplication")
splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
// Adds multiplication layer.
- layer = net.AddMultiplicationLayer("multiplication layer");
+ layer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "multiplication layer");
CHECK(layer);
softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -634,15 +636,27 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer")
CHECK(originalDescriptor.m_DataLayout == armnn::DataLayout::NCHW);
}
-TEST_CASE("CheckNullDescriptor")
+TEST_CASE("CheckNotNullDescriptor")
{
armnn::NetworkImpl net;
- armnn::IConnectableLayer* const addLayer = net.AddAdditionLayer();
+ armnn::IConnectableLayer* const addLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
CHECK(addLayer);
const armnn::BaseDescriptor& descriptor = addLayer->GetParameters();
// additional layer has no descriptor so a NullDescriptor will be returned
+ CHECK(descriptor.IsNull() == false);
+}
+
+TEST_CASE("CheckNullDescriptor")
+{
+ armnn::NetworkImpl net;
+ armnn::IConnectableLayer* const addLayer = net.AddPreluLayer();
+
+ CHECK(addLayer);
+
+ const armnn::BaseDescriptor& descriptor = addLayer->GetParameters();
+ // Prelu has no descriptor so a NullDescriptor will be returned
CHECK(descriptor.IsNull() == true);
}
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index cfdaaf529b..b8607d1b5f 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TestNameAndDescriptorLayerVisitor.hpp"
#include "Network.hpp"
-#include <armnn/Exceptions.hpp>
-
#include <doctest/doctest.h>
namespace
@@ -84,6 +82,12 @@ armnn::ConcatDescriptor GetDescriptor<armnn::ConcatDescriptor>()
}
template<>
+armnn::ElementwiseBinaryDescriptor GetDescriptor<armnn::ElementwiseBinaryDescriptor>()
+{
+ return armnn::ElementwiseBinaryDescriptor(armnn::BinaryOperation::Add);
+}
+
+template<>
armnn::ElementwiseUnaryDescriptor GetDescriptor<armnn::ElementwiseUnaryDescriptor>()
{
return armnn::ElementwiseUnaryDescriptor(armnn::UnaryOperation::Abs);
@@ -273,12 +277,14 @@ armnn::TransposeDescriptor GetDescriptor<armnn::TransposeDescriptor>()
TEST_SUITE("TestNameAndDescriptorLayerVisitor")
{
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckAdditionLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckActivationLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ArgMinMax, CheckArgMinMaxLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(DepthToSpace, CheckDepthToSpaceLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(BatchToSpaceNd, CheckBatchToSpaceNdLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Comparison, CheckComparisonLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Concat, CheckConcatLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseBinary,
+ CheckElementwiseBinaryLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseUnary, CheckElementwiseUnaryLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Fill, CheckFillLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Gather, CheckGatherLayerVisitorNameAndDescriptor)
@@ -304,7 +310,7 @@ TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(StridedSlice, CheckStridedSlic
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Transpose, CheckTransposeLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Activation,
- CheckAdditionLayerVisitorNameNullptrAndDescriptor)
+ CheckActivationLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ArgMinMax,
CheckArgMinMaxLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(DepthToSpace,
@@ -315,6 +321,8 @@ TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Comparison,
CheckComparisonLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Concat,
CheckConcatLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseBinary,
+ CheckElementwiseBinaryLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseUnary,
CheckElementwiseUnaryLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Fill,
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index b1f9512655..988903518d 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -63,6 +63,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(BatchToSpaceNd)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Comparison)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseBinary)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseUnary)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Fill)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Gather)
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 0636a00234..59dfb862a0 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,7 +27,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto add = graph.AddLayer<AdditionLayer>("add");
+ auto add = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -40,7 +40,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -48,19 +48,19 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
// Broadcast reshape layer has been added to the graph correctly
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<AdditionLayer>,
- &IsLayerOfType<OutputLayer>));
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ReshapeLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName);
CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
- CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), expectedReshapeShape);
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), expectedDataType);
}
TEST_CASE("AddBroadcastReshapeLayerSimpleTest")
@@ -121,7 +121,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto sub = graph.AddLayer<SubtractionLayer>("sub");
+ auto sub = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Sub, "sub");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -134,7 +134,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<SubtractionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -145,7 +145,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<SubtractionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0");
@@ -153,8 +153,8 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
- CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 1, 5 }));
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::Float32);
}
TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
@@ -166,7 +166,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto div = graph.AddLayer<DivisionLayer>("div");
+ auto div = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Div, "div");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -179,7 +179,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<DivisionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -190,7 +190,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<DivisionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0");
@@ -198,8 +198,8 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
- CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 4, 5 }));
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmS8);
}
TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
@@ -211,7 +211,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+ auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -224,7 +224,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -235,7 +235,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
@@ -243,8 +243,8 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
- CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 3, 5 }));
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmU8);
}
TEST_CASE("AddNoBroadcastReshapeLayerTest")
@@ -256,7 +256,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+ auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -269,7 +269,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -279,7 +279,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
@@ -295,7 +295,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
auto input = graph.AddLayer<InputLayer>(0, "input");
auto constant = graph.AddLayer<ConstantLayer>("constant");
- auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+ auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
auto output = graph.AddLayer<OutputLayer>(0, "output");
uint8_t tensor[] = { 1, 1, 1, 1, 1 };
@@ -313,7 +313,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -323,7 +323,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 };
@@ -351,8 +351,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
auto input = graph.AddLayer<InputLayer>(0, "input");
auto constant = graph.AddLayer<ConstantLayer>("constant");
- auto add1 = graph.AddLayer<AdditionLayer>("add1");
- auto add2 = graph.AddLayer<AdditionLayer>("add2");
+ auto add1 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add1");
+ auto add2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add2");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input->GetOutputSlot().SetTensorInfo(inputInfo);
@@ -371,8 +371,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<AdditionLayer>,
- &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -384,8 +384,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<ReshapeLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<AdditionLayer>,
- &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Ensure the output shape of the constant hasn't changed.
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 152e79925b..018286c70d 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,7 @@ TEST_CASE("MovePermuteUpTest")
head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Addition.
@@ -54,7 +54,7 @@ TEST_CASE("MovePermuteUpTest")
head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Multiplication.
@@ -69,9 +69,9 @@ TEST_CASE("MovePermuteUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
- &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
@@ -80,10 +80,11 @@ TEST_CASE("MovePermuteUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
- &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
&IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { permuteLayerName };
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index 09bf9ae7d9..6a6010cb4d 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,7 @@ TEST_CASE("MoveTransposeUpTest")
head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Addition.
@@ -55,7 +55,7 @@ TEST_CASE("MoveTransposeUpTest")
head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Multiplication.
@@ -70,9 +70,9 @@ TEST_CASE("MoveTransposeUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
- &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
@@ -81,10 +81,11 @@ TEST_CASE("MoveTransposeUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
- &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
&IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { transposeLayerName };