aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/GraphTests.cpp10
-rw-r--r--src/armnn/test/NetworkTests.cpp34
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp18
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp3
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp74
-rw-r--r--src/armnn/test/optimizations/MovePermuteUpTests.cpp17
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp17
7 files changed, 99 insertions, 74 deletions
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index eea7ae824a..b1b1a84ec9 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <GraphUtils.hpp>
@@ -36,7 +36,7 @@ TEST_CASE("TopologicalSort")
CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
- CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerC"));
CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
@@ -82,7 +82,7 @@ TEST_CASE("InsertNewLayerBefore")
CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
- CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerD"));
CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
@@ -168,7 +168,7 @@ TEST_CASE("InsertNewLayerAfter")
CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
- CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerD"));
CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
@@ -548,7 +548,7 @@ TEST_CASE_FIXTURE(CopyLayersFixture, "CopyLayersAddedBetweenSameLayersHaveDiffer
armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
- armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
+ auto* const additionLayer = graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "addition");
additionLayer->SetBackendId(armnn::Compute::CpuRef);
armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 058f079e46..0bfad4d4d1 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ TEST_CASE("LayerGuids")
{
armnn::NetworkImpl net;
LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
- LayerGuid addId = net.AddAdditionLayer()->GetGuid();
+ LayerGuid addId = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add)->GetGuid();
LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
CHECK(inputId != addId);
@@ -50,7 +50,7 @@ TEST_CASE("LayerNamesAreOptionalForINetwork")
{
armnn::INetworkPtr inet(armnn::INetwork::Create());
inet->AddInputLayer(0);
- inet->AddAdditionLayer();
+ inet->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
inet->AddActivationLayer(armnn::ActivationDescriptor());
inet->AddOutputLayer(0);
}
@@ -59,7 +59,7 @@ TEST_CASE("LayerNamesAreOptionalForNetwork")
{
armnn::NetworkImpl net;
net.AddInputLayer(0);
- net.AddAdditionLayer();
+ net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
net.AddActivationLayer(armnn::ActivationDescriptor());
net.AddOutputLayer(0);
}
@@ -136,13 +136,15 @@ TEST_CASE("NetworkModification")
softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
- armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition");
+ armnn::IConnectableLayer* const additionLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add,
+ "addition");
CHECK(additionLayer);
batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
- armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication");
+ armnn::IConnectableLayer* const multiplicationLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul,
+ "multiplication");
CHECK(multiplicationLayer);
additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
@@ -338,7 +340,7 @@ TEST_CASE("NetworkModification_SplitterAddition")
splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
// Adds addition layer.
- layer = net.AddAdditionLayer("add layer");
+ layer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add, "add layer");
CHECK(layer);
softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -382,7 +384,7 @@ TEST_CASE("NetworkModification_SplitterMultiplication")
splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
// Adds multiplication layer.
- layer = net.AddMultiplicationLayer("multiplication layer");
+ layer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "multiplication layer");
CHECK(layer);
softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -634,15 +636,27 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer")
CHECK(originalDescriptor.m_DataLayout == armnn::DataLayout::NCHW);
}
-TEST_CASE("CheckNullDescriptor")
+TEST_CASE("CheckNotNullDescriptor")
{
armnn::NetworkImpl net;
- armnn::IConnectableLayer* const addLayer = net.AddAdditionLayer();
+ armnn::IConnectableLayer* const addLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
CHECK(addLayer);
const armnn::BaseDescriptor& descriptor = addLayer->GetParameters();
// additional layer has no descriptor so a NullDescriptor will be returned
+ CHECK(descriptor.IsNull() == false);
+}
+
+TEST_CASE("CheckNullDescriptor")
+{
+ armnn::NetworkImpl net;
+ armnn::IConnectableLayer* const addLayer = net.AddPreluLayer();
+
+ CHECK(addLayer);
+
+ const armnn::BaseDescriptor& descriptor = addLayer->GetParameters();
+ // Prelu has no descriptor so a NullDescriptor will be returned
CHECK(descriptor.IsNull() == true);
}
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index cfdaaf529b..b8607d1b5f 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TestNameAndDescriptorLayerVisitor.hpp"
#include "Network.hpp"
-#include <armnn/Exceptions.hpp>
-
#include <doctest/doctest.h>
namespace
@@ -84,6 +82,12 @@ armnn::ConcatDescriptor GetDescriptor<armnn::ConcatDescriptor>()
}
template<>
+armnn::ElementwiseBinaryDescriptor GetDescriptor<armnn::ElementwiseBinaryDescriptor>()
+{
+ return armnn::ElementwiseBinaryDescriptor(armnn::BinaryOperation::Add);
+}
+
+template<>
armnn::ElementwiseUnaryDescriptor GetDescriptor<armnn::ElementwiseUnaryDescriptor>()
{
return armnn::ElementwiseUnaryDescriptor(armnn::UnaryOperation::Abs);
@@ -273,12 +277,14 @@ armnn::TransposeDescriptor GetDescriptor<armnn::TransposeDescriptor>()
TEST_SUITE("TestNameAndDescriptorLayerVisitor")
{
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckAdditionLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckActivationLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ArgMinMax, CheckArgMinMaxLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(DepthToSpace, CheckDepthToSpaceLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(BatchToSpaceNd, CheckBatchToSpaceNdLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Comparison, CheckComparisonLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Concat, CheckConcatLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseBinary,
+ CheckElementwiseBinaryLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseUnary, CheckElementwiseUnaryLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Fill, CheckFillLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Gather, CheckGatherLayerVisitorNameAndDescriptor)
@@ -304,7 +310,7 @@ TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(StridedSlice, CheckStridedSlic
TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Transpose, CheckTransposeLayerVisitorNameAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Activation,
- CheckAdditionLayerVisitorNameNullptrAndDescriptor)
+ CheckActivationLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ArgMinMax,
CheckArgMinMaxLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(DepthToSpace,
@@ -315,6 +321,8 @@ TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Comparison,
CheckComparisonLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Concat,
CheckConcatLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseBinary,
+ CheckElementwiseBinaryLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseUnary,
CheckElementwiseUnaryLayerVisitorNameNullptrAndDescriptor)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Fill,
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index b1f9512655..988903518d 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -63,6 +63,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(BatchToSpaceNd)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Comparison)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseBinary)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseUnary)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Fill)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Gather)
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 0636a00234..59dfb862a0 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,7 +27,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto add = graph.AddLayer<AdditionLayer>("add");
+ auto add = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -40,7 +40,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -48,19 +48,19 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
// Broadcast reshape layer has been added to the graph correctly
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<AdditionLayer>,
- &IsLayerOfType<OutputLayer>));
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ReshapeLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName);
CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
- CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), expectedReshapeShape);
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), expectedDataType);
}
TEST_CASE("AddBroadcastReshapeLayerSimpleTest")
@@ -121,7 +121,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto sub = graph.AddLayer<SubtractionLayer>("sub");
+ auto sub = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Sub, "sub");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -134,7 +134,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<SubtractionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -145,7 +145,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<SubtractionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0");
@@ -153,8 +153,8 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
- CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 1, 5 }));
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::Float32);
}
TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
@@ -166,7 +166,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto div = graph.AddLayer<DivisionLayer>("div");
+ auto div = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Div, "div");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -179,7 +179,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<DivisionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -190,7 +190,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<DivisionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0");
@@ -198,8 +198,8 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
- CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 4, 5 }));
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmS8);
}
TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
@@ -211,7 +211,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+ auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -224,7 +224,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -235,7 +235,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
@@ -243,8 +243,8 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
- CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
+ CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 3, 5 }));
+ CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmU8);
}
TEST_CASE("AddNoBroadcastReshapeLayerTest")
@@ -256,7 +256,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest")
auto input0 = graph.AddLayer<InputLayer>(0, "input0");
auto input1 = graph.AddLayer<InputLayer>(1, "input1");
- auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+ auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input0->GetOutputSlot().SetTensorInfo(info0);
input1->GetOutputSlot().SetTensorInfo(info1);
@@ -269,7 +269,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -279,7 +279,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
@@ -295,7 +295,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
auto input = graph.AddLayer<InputLayer>(0, "input");
auto constant = graph.AddLayer<ConstantLayer>("constant");
- auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+ auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
auto output = graph.AddLayer<OutputLayer>(0, "output");
uint8_t tensor[] = { 1, 1, 1, 1, 1 };
@@ -313,7 +313,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -323,7 +323,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<MultiplicationLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 };
@@ -351,8 +351,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
auto input = graph.AddLayer<InputLayer>(0, "input");
auto constant = graph.AddLayer<ConstantLayer>("constant");
- auto add1 = graph.AddLayer<AdditionLayer>("add1");
- auto add2 = graph.AddLayer<AdditionLayer>("add2");
+ auto add1 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add1");
+ auto add2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add2");
auto output = graph.AddLayer<OutputLayer>(0, "output");
input->GetOutputSlot().SetTensorInfo(inputInfo);
@@ -371,8 +371,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<AdditionLayer>,
- &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Run optimizer
@@ -384,8 +384,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<ReshapeLayer>,
&IsLayerOfType<ReshapeLayer>,
- &IsLayerOfType<AdditionLayer>,
- &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
&IsLayerOfType<OutputLayer>));
// Ensure the output shape of the constant hasn't changed.
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 152e79925b..018286c70d 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,7 @@ TEST_CASE("MovePermuteUpTest")
head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Addition.
@@ -54,7 +54,7 @@ TEST_CASE("MovePermuteUpTest")
head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Multiplication.
@@ -69,9 +69,9 @@ TEST_CASE("MovePermuteUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
- &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
@@ -80,10 +80,11 @@ TEST_CASE("MovePermuteUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
- &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
&IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { permuteLayerName };
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index 09bf9ae7d9..6a6010cb4d 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,7 @@ TEST_CASE("MoveTransposeUpTest")
head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Addition.
@@ -55,7 +55,7 @@ TEST_CASE("MoveTransposeUpTest")
head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Multiplication.
@@ -70,9 +70,9 @@ TEST_CASE("MoveTransposeUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
- &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
@@ -81,10 +81,11 @@ TEST_CASE("MoveTransposeUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
- &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
&IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { transposeLayerName };