aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-08 13:47:17 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-03-14 16:40:09 +0000
commit3ec3077b4eaedcc0c20ab5774bdbe365da541445 (patch)
treed601d2000897dec8691bf64cbddc9036f26b8034 /src/armnn/test/optimizations/MoveTransposeUpTests.cpp
parenta088cd00b3cce672d26cdcb4965fc2a86b48f339 (diff)
downloadarmnn-3ec3077b4eaedcc0c20ab5774bdbe365da541445.tar.gz
IVGCVSW-3808 Add ElementwiseBinaryLayer
!android-nn-driver:9329 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary operations including Add, Div, Sub, Maximum, Mul and Minimum. * Updated Delegate to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add layer. * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated CL and Neon tests to use ElementwiseBinaryLayer. * Updated CL and Neon Backend Specific Optimizations to accept ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum layers. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
Diffstat (limited to 'src/armnn/test/optimizations/MoveTransposeUpTests.cpp')
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp17
1 files changed, 9 insertions, 8 deletions
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index 09bf9ae7d9..6a6010cb4d 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,7 @@ TEST_CASE("MoveTransposeUpTest")
head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Addition.
@@ -55,7 +55,7 @@ TEST_CASE("MoveTransposeUpTest")
head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
head->GetOutputHandler().SetTensorInfo(info);
- head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
head->GetOutputHandler().SetTensorInfo(info);
// Inserts input for 2nd input of Multiplication.
@@ -70,9 +70,9 @@ TEST_CASE("MoveTransposeUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
- &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
@@ -81,10 +81,11 @@ TEST_CASE("MoveTransposeUpTest")
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
- &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
&IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { transposeLayerName };