From 1a05aad6d5adf3b25848ffd873a0e0e82756aa06 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 31 Mar 2023 18:00:00 +0100 Subject: Revert "IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers" This reverts commit 52e90bf59ecbe90d33368d8fc1fd120f07658aaf. Change-Id: I5a0d244593d8e760ee7ba0c9d38c02377e1bdc24 Signed-off-by: Mike Kelly --- src/armnn/test/OptimizerTests.cpp | 46 ++++++++++------------ src/armnn/test/RuntimeTests.cpp | 18 +++------ src/armnn/test/ShapeInferenceTests.cpp | 2 - src/armnn/test/SubgraphViewTests.cpp | 45 +++++++++------------ src/armnn/test/TestNameOnlyLayerVisitor.cpp | 13 +----- .../test/optimizations/FuseActivationTests.cpp | 11 +----- 6 files changed, 47 insertions(+), 88 deletions(-) (limited to 'src/armnn/test') diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index ff42ab8cbb..f83900404b 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -339,9 +339,7 @@ TEST_CASE("InsertConvertersTest") armnn::Layer* head = graph.AddLayer(0, "output"); - ARMNN_NO_DEPRECATE_WARN_BEGIN head = graph.InsertNewLayer(head->GetInputSlot(0), ""); - ARMNN_NO_DEPRECATE_WARN_END head->GetOutputHandler().SetTensorInfo(info); graph.InsertNewLayer(head->GetInputSlot(1), inputId++, "") @@ -357,16 +355,14 @@ TEST_CASE("InsertConvertersTest") ->GetOutputHandler().SetTensorInfo(info); // Check graph layer sequence before inserting convert layers - ARMNN_NO_DEPRECATE_WARN_BEGIN CHECK(CheckSequence(graph.cbegin(), - graph.cend(), - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType)); - ARMNN_NO_DEPRECATE_WARN_END + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); // Check layers have Float16 DataType for (auto& layer : graph) @@ -409,21 +405,19 @@ TEST_CASE("InsertConvertersTest") } // Check sequence of layers after inserting convert layers - ARMNN_NO_DEPRECATE_WARN_BEGIN CHECK(CheckSequence(graph.cbegin(), - graph.cend(), - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType)); - ARMNN_NO_DEPRECATE_WARN_END + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); } void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape, diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 6b3fe0f211..e0d3a222fe 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -18,6 +19,9 @@ #include +#include +#include + #ifdef WITH_VALGRIND #include #endif @@ -72,9 +76,7 @@ TEST_CASE("RuntimePreImportInputs") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); - ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1304,9 +1306,7 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); - ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1349,9 +1349,7 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); - ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1394,9 +1392,7 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); - ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1439,9 +1435,7 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); - ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1489,9 +1483,7 @@ TEST_CASE("SyncExecutePreImportInputsHappyPath") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); - ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 }; diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index c33b248dc1..7b5d73a4e5 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -181,9 +181,7 @@ TEST_CASE("AbsTest") TEST_CASE("AdditionTest") { - ARMNN_NO_DEPRECATE_WARN_BEGIN CreateGraphAndRunTest({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add"); - ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("ArgMinMaxTest") diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp index 4fcb476fcf..e0fd5fe7c1 100644 --- a/src/armnn/test/SubgraphViewTests.cpp +++ b/src/armnn/test/SubgraphViewTests.cpp @@ -1054,7 +1054,7 @@ TEST_CASE("MultiInputSingleOutput") auto layerX2 = graph.AddLayer(1, "layerX2"); auto layerM1 = graph.AddLayer(activationDefaults, "layerM1"); auto layerM2 = graph.AddLayer(activationDefaults, "layerM2"); - auto layerM3 = graph.AddLayer(BinaryOperation::Add, "layerM3"); + auto layerM3 = graph.AddLayer("layerM3"); auto layerX3 = graph.AddLayer(0, "layerX3"); // X1 X2 @@ -1081,7 +1081,7 @@ TEST_CASE("MultiInputSingleOutput") [](const Layer & l) { bool toSelect = (l.GetType() == LayerType::Activation - || l.GetType() == LayerType::ElementwiseBinary); + || l.GetType() == LayerType::Addition); return toSelect; }); @@ -1772,7 +1772,7 @@ TEST_CASE("SubgraphCycles") auto m0 = graph.AddLayer(ActivationDescriptor{}, "m0"); auto x1 = graph.AddLayer(ActivationDescriptor{}, "x1"); auto m1 = graph.AddLayer(ActivationDescriptor{}, "m1"); - auto m2 = graph.AddLayer(BinaryOperation::Add, "m2"); + auto m2 = graph.AddLayer("m2"); auto x2 = graph.AddLayer(ActivationDescriptor{}, "x2"); x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0)); @@ -1872,7 +1872,7 @@ TEST_CASE("SubgraphViewWorkingCopy") bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph, IConnectableLayer* layer) { - if (layer->GetType() == LayerType::ElementwiseBinary) + if (layer->GetType() == LayerType::Multiplication) { IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0); IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1); @@ -1937,12 +1937,12 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph, bool ReplaceTestMultiplication(SubgraphView& subgraph, IConnectableLayer* layer) { - if (layer->GetType() == LayerType::ElementwiseBinary) + if (layer->GetType() == LayerType::Multiplication) { switch (layer->GetType()) { - case LayerType::ElementwiseBinary: + case LayerType::Multiplication: return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer); break; default: @@ -1993,7 +1993,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); + IConnectableLayer* mul = graph.AddLayer("mul"); IConnectableLayer* output = graph.AddLayer(0, "output"); // Create connections between layers @@ -2015,10 +2015,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc") // Check the WorkingCopy is as expected before replacement CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; - LayerType expectedSorted[] = {LayerType::Input, - LayerType::Constant, - LayerType::ElementwiseBinary, - LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2212,7 +2209,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); + IConnectableLayer* mul = graph.AddLayer("mul"); IConnectableLayer* output = graph.AddLayer(0, "output"); // Create connections between layers @@ -2233,10 +2230,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews") // Check the WorkingCopy is as expected before replacement int idx=0; - LayerType expectedSorted[] = {LayerType::Input, - LayerType::Constant, - LayerType::ElementwiseBinary, - LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2291,7 +2285,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); + IConnectableLayer* mul = graph.AddLayer("mul"); IConnectableLayer* output = graph.AddLayer(0, "output"); // Create connections between layers @@ -2312,10 +2306,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots") // Check the WorkingCopy is as expected before replacement CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; - LayerType expectedSorted[] = {LayerType::Input, - LayerType::Constant, - LayerType::ElementwiseBinary, - LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2355,7 +2346,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); + IConnectableLayer* mul = graph.AddLayer("mul"); armnn::ViewsDescriptor splitterDesc(2,4); IConnectableLayer* split = graph.AddLayer(splitterDesc, "split"); IConnectableLayer* abs = graph.AddLayer(ActivationFunction::Abs, "abs"); @@ -2420,7 +2411,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots") CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; LayerType expectedSorted[] = {LayerType::Constant, - LayerType::ElementwiseBinary, + LayerType::Multiplication, LayerType::Splitter, LayerType::Activation}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) @@ -2541,7 +2532,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph") Layer* convLayer = graph.AddLayer(Convolution2dDescriptor(), "conv"); Layer* reluLayer = graph.AddLayer(ActivationDescriptor(), "activation"); Layer* constLayer = graph.AddLayer("const"); - Layer* addLayer = graph.AddLayer(BinaryOperation::Add, "add"); + Layer* addLayer = graph.AddLayer("add"); Layer* outputLayer1 = graph.AddLayer(0, "output1"); Layer* outputLayer2 = graph.AddLayer(1, "output2"); @@ -2592,7 +2583,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph") // GetWorkingCopy() has caused address pointer of convolution layer to change. // Finding new address pointer... - if (layer->GetType() == LayerType::ElementwiseBinary) + if (layer->GetType() == LayerType::Addition) { addCopyLayer = layer; } @@ -2643,7 +2634,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots") Layer* convLayer = graph.AddLayer(Convolution2dDescriptor(), "conv"); Layer* reluLayer = graph.AddLayer(ActivationDescriptor(), "activation"); Layer* constLayer = graph.AddLayer("const"); - Layer* addLayer = graph.AddLayer(BinaryOperation::Add, "add"); + Layer* addLayer = graph.AddLayer("add"); Layer* outputLayer1 = graph.AddLayer(0, "output1"); Layer* outputLayer2 = graph.AddLayer(1, "output2"); @@ -2669,7 +2660,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots") { // GetWorkingCopy() has caused address pointer of convolution layer to change. // Finding new address pointer... - if (layer->GetType() == LayerType::ElementwiseBinary) + if (layer->GetType() == LayerType::Addition) { addCopyLayer = layer; } diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp index eb488a5bcb..497c36b079 100644 --- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp +++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,40 +34,31 @@ TEST_CASE(#testName) \ TEST_SUITE("TestNameOnlyLayerVisitor") { -ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr) -ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr) + } diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp index 2ccbc9418c..3b8917192d 100644 --- a/src/armnn/test/optimizations/FuseActivationTests.cpp +++ b/src/armnn/test/optimizations/FuseActivationTests.cpp @@ -1,10 +1,11 @@ // -// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LayersFwd.hpp" +#include #include #include #include @@ -237,7 +238,6 @@ public: } }; -ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct MultiplicationTest { @@ -272,9 +272,7 @@ struct MultiplicationTest return {}; } }; -ARMNN_NO_DEPRECATE_WARN_END -ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct AdditionTest { @@ -309,9 +307,7 @@ struct AdditionTest return {}; } }; -ARMNN_NO_DEPRECATE_WARN_END -ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct SubtractionTest { @@ -346,9 +342,7 @@ struct SubtractionTest return {}; } }; -ARMNN_NO_DEPRECATE_WARN_END -ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct DivisionTest { @@ -383,7 +377,6 @@ struct DivisionTest return {}; } }; -ARMNN_NO_DEPRECATE_WARN_END template -- cgit v1.2.1