diff options
author | Mike Kelly <mike.kelly@arm.com> | 2023-03-08 13:47:17 +0000 |
---|---|---|
committer | Francis Murtagh <francis.murtagh@arm.com> | 2023-03-14 16:40:09 +0000 |
commit | 3ec3077b4eaedcc0c20ab5774bdbe365da541445 (patch) | |
tree | d601d2000897dec8691bf64cbddc9036f26b8034 /src/armnn/test/optimizations | |
parent | a088cd00b3cce672d26cdcb4965fc2a86b48f339 (diff) | |
download | armnn-3ec3077b4eaedcc0c20ab5774bdbe365da541445.tar.gz |
IVGCVSW-3808 Add ElementwiseBinaryLayer
!android-nn-driver:9329
* Added ElementwiseBinaryLayer that can represent all ElementwiseBinary
operations including Add, Div, Sub, Maximum, Mul and Minimum.
* Updated Delegate to use ElementwiseBinaryLayer instead of the Add,
Div, Sub, Maximum, Mul and Minimum layers.
* Updated Deserializer to use ElementwiseBinaryLayer instead of the Add,
Div, Sub, Maximum, Mul and Minimum layers.
* Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add
layer.
* Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add,
Div, Sub, Maximum, Mul and Minimum layers.
* Updated CL and Neon tests to use ElementwiseBinaryLayer.
* Updated CL and Neon Backend Specific Optimizations to accept
ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum
layers.
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
Diffstat (limited to 'src/armnn/test/optimizations')
3 files changed, 55 insertions, 53 deletions
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp index 0636a00234..59dfb862a0 100644 --- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp +++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,7 +27,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0, auto input0 = graph.AddLayer<InputLayer>(0, "input0"); auto input1 = graph.AddLayer<InputLayer>(1, "input1"); - auto add = graph.AddLayer<AdditionLayer>("add"); + auto add = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add"); auto output = graph.AddLayer<OutputLayer>(0, "output"); input0->GetOutputSlot().SetTensorInfo(info0); input1->GetOutputSlot().SetTensorInfo(info1); @@ -40,7 +40,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0, CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, - &IsLayerOfType<AdditionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -48,19 +48,19 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0, // Broadcast reshape layer has been added to the graph correctly CHECK(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<InputLayer>, - &IsLayerOfType<InputLayer>, - &IsLayerOfType<ReshapeLayer>, - &IsLayerOfType<AdditionLayer>, - &IsLayerOfType<OutputLayer>)); + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<ReshapeLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, + &IsLayerOfType<OutputLayer>)); Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName); CHECK(reshapeLayer); auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); // Tensorshape and the data type are correct - CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape)); - CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType)); + CHECK_EQ(addedReshapeTensorInfo.GetShape(), expectedReshapeShape); + CHECK_EQ(addedReshapeTensorInfo.GetDataType(), expectedDataType); } TEST_CASE("AddBroadcastReshapeLayerSimpleTest") @@ -121,7 +121,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest") auto input0 = graph.AddLayer<InputLayer>(0, "input0"); auto input1 = graph.AddLayer<InputLayer>(1, "input1"); - auto sub = graph.AddLayer<SubtractionLayer>("sub"); + auto sub = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Sub, "sub"); auto output = graph.AddLayer<OutputLayer>(0, "output"); input0->GetOutputSlot().SetTensorInfo(info0); input1->GetOutputSlot().SetTensorInfo(info1); @@ -134,7 +134,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, - &IsLayerOfType<SubtractionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -145,7 +145,7 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest") &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, &IsLayerOfType<ReshapeLayer>, - &IsLayerOfType<SubtractionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0"); @@ -153,8 +153,8 @@ TEST_CASE("AddBroadcastReshapeLayerSubtractionTest") auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); // Tensorshape and the data type are correct - CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 }))); - CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32)); + CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 1, 5 })); + CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::Float32); } TEST_CASE("AddBroadcastReshapeLayerDivisionTest") @@ -166,7 +166,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest") auto input0 = graph.AddLayer<InputLayer>(0, "input0"); auto input1 = graph.AddLayer<InputLayer>(1, "input1"); - auto div = graph.AddLayer<DivisionLayer>("div"); + auto div = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Div, "div"); auto output = graph.AddLayer<OutputLayer>(0, "output"); input0->GetOutputSlot().SetTensorInfo(info0); input1->GetOutputSlot().SetTensorInfo(info1); @@ -179,7 +179,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, - &IsLayerOfType<DivisionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -190,7 +190,7 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest") &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, &IsLayerOfType<ReshapeLayer>, - &IsLayerOfType<DivisionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0"); @@ -198,8 +198,8 @@ TEST_CASE("AddBroadcastReshapeLayerDivisionTest") auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); // Tensorshape and the data type are correct - CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 }))); - CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8)); + CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 4, 5 })); + CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmS8); } TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest") @@ -211,7 +211,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest") auto input0 = graph.AddLayer<InputLayer>(0, "input0"); auto input1 = graph.AddLayer<InputLayer>(1, "input1"); - auto mul = graph.AddLayer<MultiplicationLayer>("mul"); + auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); auto output = graph.AddLayer<OutputLayer>(0, "output"); input0->GetOutputSlot().SetTensorInfo(info0); input1->GetOutputSlot().SetTensorInfo(info1); @@ -224,7 +224,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, - &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -235,7 +235,7 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest") &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, &IsLayerOfType<ReshapeLayer>, - &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0"); @@ -243,8 +243,8 @@ TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest") auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); // Tensorshape and the data type are correct - CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 }))); - CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8)); + CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 3, 5 })); + CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmU8); } TEST_CASE("AddNoBroadcastReshapeLayerTest") @@ -256,7 +256,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest") auto input0 = graph.AddLayer<InputLayer>(0, "input0"); auto input1 = graph.AddLayer<InputLayer>(1, "input1"); - auto mul = graph.AddLayer<MultiplicationLayer>("mul"); + auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); auto output = graph.AddLayer<OutputLayer>(0, "output"); input0->GetOutputSlot().SetTensorInfo(info0); input1->GetOutputSlot().SetTensorInfo(info1); @@ -269,7 +269,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, - &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -279,7 +279,7 @@ TEST_CASE("AddNoBroadcastReshapeLayerTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<InputLayer>, - &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0"); @@ -295,7 +295,7 @@ TEST_CASE("ReshapeParentConstLayerTest") auto input = graph.AddLayer<InputLayer>(0, "input"); auto constant = graph.AddLayer<ConstantLayer>("constant"); - auto mul = graph.AddLayer<MultiplicationLayer>("mul"); + auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); auto output = graph.AddLayer<OutputLayer>(0, "output"); uint8_t tensor[] = { 1, 1, 1, 1, 1 }; @@ -313,7 +313,7 @@ TEST_CASE("ReshapeParentConstLayerTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<ConstantLayer>, - &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -323,7 +323,7 @@ TEST_CASE("ReshapeParentConstLayerTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<ConstantLayer>, - &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 }; @@ -351,8 +351,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest") auto input = graph.AddLayer<InputLayer>(0, "input"); auto constant = graph.AddLayer<ConstantLayer>("constant"); - auto add1 = graph.AddLayer<AdditionLayer>("add1"); - auto add2 = graph.AddLayer<AdditionLayer>("add2"); + auto add1 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add1"); + auto add2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add2"); auto output = graph.AddLayer<OutputLayer>(0, "output"); input->GetOutputSlot().SetTensorInfo(inputInfo); @@ -371,8 +371,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<ConstantLayer>, - &IsLayerOfType<AdditionLayer>, - &IsLayerOfType<AdditionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Run optimizer @@ -384,8 +384,8 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest") &IsLayerOfType<ConstantLayer>, &IsLayerOfType<ReshapeLayer>, &IsLayerOfType<ReshapeLayer>, - &IsLayerOfType<AdditionLayer>, - &IsLayerOfType<AdditionLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, + &IsLayerOfType<ElementwiseBinaryLayer>, &IsLayerOfType<OutputLayer>)); // Ensure the output shape of the constant hasn't changed. diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp index 152e79925b..018286c70d 100644 --- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp +++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -36,7 +36,7 @@ TEST_CASE("MovePermuteUpTest") head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, ""); head->GetOutputHandler().SetTensorInfo(info); - head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), ""); + head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, ""); head->GetOutputHandler().SetTensorInfo(info); // Inserts input for 2nd input of Addition. @@ -54,7 +54,7 @@ TEST_CASE("MovePermuteUpTest") head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), ""); head->GetOutputHandler().SetTensorInfo(info); - head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), ""); + head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, ""); head->GetOutputHandler().SetTensorInfo(info); // Inserts input for 2nd input of Multiplication. @@ -69,9 +69,9 @@ TEST_CASE("MovePermuteUpTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>, + &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>, - &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>, + &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>)); armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp())); @@ -80,10 +80,11 @@ TEST_CASE("MovePermuteUpTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>, - &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>, + &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>, - &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>, - &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>)); + &IsLayerOfType<armnn::FakeQuantizationLayer>, + &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>, + &IsLayerOfType<armnn::OutputLayer>)); std::list<std::string> testRelatedLayers = { permuteLayerName }; diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp index 09bf9ae7d9..6a6010cb4d 100644 --- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp +++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020-2021,2023 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -37,7 +37,7 @@ TEST_CASE("MoveTransposeUpTest") head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, ""); head->GetOutputHandler().SetTensorInfo(info); - head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), ""); + head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, ""); head->GetOutputHandler().SetTensorInfo(info); // Inserts input for 2nd input of Addition. @@ -55,7 +55,7 @@ TEST_CASE("MoveTransposeUpTest") head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), ""); head->GetOutputHandler().SetTensorInfo(info); - head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), ""); + head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, ""); head->GetOutputHandler().SetTensorInfo(info); // Inserts input for 2nd input of Multiplication. @@ -70,9 +70,9 @@ TEST_CASE("MoveTransposeUpTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>, + &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>, - &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>, + &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>)); armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp())); @@ -81,10 +81,11 @@ TEST_CASE("MoveTransposeUpTest") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>, - &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>, + &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>, - &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>, - &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>)); + &IsLayerOfType<armnn::FakeQuantizationLayer>, + &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>, + &IsLayerOfType<armnn::OutputLayer>)); std::list<std::string> testRelatedLayers = { transposeLayerName }; |