diff options
author | Mike Kelly <mike.kelly@arm.com> | 2023-03-15 15:06:23 +0000 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2023-03-30 13:40:17 +0000 |
commit | 52e90bf59ecbe90d33368d8fc1fd120f07658aaf (patch) | |
tree | 7ea7d3bb8148ce3973e0fd6abcd951437211255d /src/armnn/test | |
parent | 41f9d2a5bc060f6c63e80621ff2264a66fb298bd (diff) | |
download | armnn-52e90bf59ecbe90d33368d8fc1fd120f07658aaf.tar.gz |
IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers
* Added Deprecation notices for old ElementwiseBinary layers.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Iebbbaff38cc9c347b25eb2f9054c914a4f931c68
Diffstat (limited to 'src/armnn/test')
-rw-r--r-- | src/armnn/test/OptimizerTests.cpp | 46 | ||||
-rw-r--r-- | src/armnn/test/RuntimeTests.cpp | 18 | ||||
-rw-r--r-- | src/armnn/test/ShapeInferenceTests.cpp | 2 | ||||
-rw-r--r-- | src/armnn/test/SubgraphViewTests.cpp | 45 | ||||
-rw-r--r-- | src/armnn/test/TestNameOnlyLayerVisitor.cpp | 13 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FuseActivationTests.cpp | 11 |
6 files changed, 88 insertions, 47 deletions
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index f83900404b..ff42ab8cbb 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -339,7 +339,9 @@ TEST_CASE("InsertConvertersTest") armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output"); + ARMNN_NO_DEPRECATE_WARN_BEGIN head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), ""); + ARMNN_NO_DEPRECATE_WARN_END head->GetOutputHandler().SetTensorInfo(info); graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "") @@ -355,14 +357,16 @@ TEST_CASE("InsertConvertersTest") ->GetOutputHandler().SetTensorInfo(info); // Check graph layer sequence before inserting convert layers + ARMNN_NO_DEPRECATE_WARN_BEGIN CHECK(CheckSequence(graph.cbegin(), - graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::MemCopyLayer>, - &IsLayerOfType<armnn::FloorLayer>, - &IsLayerOfType<armnn::AdditionLayer>, - &IsLayerOfType<armnn::OutputLayer>)); + graph.cend(), + &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::MemCopyLayer>, + &IsLayerOfType<armnn::FloorLayer>, + &IsLayerOfType<armnn::AdditionLayer>, + &IsLayerOfType<armnn::OutputLayer>)); + ARMNN_NO_DEPRECATE_WARN_END // Check layers have Float16 DataType for (auto& layer : graph) @@ -405,19 +409,21 @@ TEST_CASE("InsertConvertersTest") } // Check sequence of layers after inserting convert layers + ARMNN_NO_DEPRECATE_WARN_BEGIN CHECK(CheckSequence(graph.cbegin(), - graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, - &IsLayerOfType<armnn::MemCopyLayer>, - &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, - &IsLayerOfType<armnn::FloorLayer>, - &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, - &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, - &IsLayerOfType<armnn::AdditionLayer>, - &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, - &IsLayerOfType<armnn::OutputLayer>)); + graph.cend(), + &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, + &IsLayerOfType<armnn::MemCopyLayer>, + &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, + &IsLayerOfType<armnn::FloorLayer>, + &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, + &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, + &IsLayerOfType<armnn::AdditionLayer>, + &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, + &IsLayerOfType<armnn::OutputLayer>)); + ARMNN_NO_DEPRECATE_WARN_END } void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape, diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index e0d3a222fe..6b3fe0f211 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -10,7 +10,6 @@ #include <armnn/Descriptors.hpp> #include <armnn/IRuntime.hpp> #include <armnn/INetwork.hpp> -#include <armnn/TypesUtils.hpp> #include <armnn/profiling/ArmNNProfiling.hpp> @@ -19,9 +18,6 @@ #include <test/ProfilingTestUtils.hpp> -#include <HeapProfiling.hpp> -#include <LeakChecking.hpp> - #ifdef WITH_VALGRIND #include <valgrind/memcheck.h> #endif @@ -76,7 +72,9 @@ TEST_CASE("RuntimePreImportInputs") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1306,7 +1304,9 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1349,7 +1349,9 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1392,7 +1394,9 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1435,7 +1439,9 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1483,7 +1489,9 @@ TEST_CASE("SyncExecutePreImportInputsHappyPath") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 }; diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index 7b5d73a4e5..c33b248dc1 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -181,7 +181,9 @@ TEST_CASE("AbsTest") TEST_CASE("AdditionTest") { + ARMNN_NO_DEPRECATE_WARN_BEGIN CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add"); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("ArgMinMaxTest") diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp index e0fd5fe7c1..4fcb476fcf 100644 --- a/src/armnn/test/SubgraphViewTests.cpp +++ b/src/armnn/test/SubgraphViewTests.cpp @@ -1054,7 +1054,7 @@ TEST_CASE("MultiInputSingleOutput") auto layerX2 = graph.AddLayer<InputLayer>(1, "layerX2"); auto layerM1 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM1"); auto layerM2 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM2"); - auto layerM3 = graph.AddLayer<AdditionLayer>("layerM3"); + auto layerM3 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "layerM3"); auto layerX3 = graph.AddLayer<OutputLayer>(0, "layerX3"); // X1 X2 @@ -1081,7 +1081,7 @@ TEST_CASE("MultiInputSingleOutput") [](const Layer & l) { bool toSelect = (l.GetType() == LayerType::Activation - || l.GetType() == LayerType::Addition); + || l.GetType() == LayerType::ElementwiseBinary); return toSelect; }); @@ -1772,7 +1772,7 @@ TEST_CASE("SubgraphCycles") auto m0 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m0"); auto x1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x1"); auto m1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m1"); - auto m2 = graph.AddLayer<AdditionLayer>("m2"); + auto m2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "m2"); auto x2 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x2"); x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0)); @@ -1872,7 +1872,7 @@ TEST_CASE("SubgraphViewWorkingCopy") bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph, IConnectableLayer* layer) { - if (layer->GetType() == LayerType::Multiplication) + if (layer->GetType() == LayerType::ElementwiseBinary) { IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0); IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1); @@ -1937,12 +1937,12 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph, bool ReplaceTestMultiplication(SubgraphView& subgraph, IConnectableLayer* layer) { - if (layer->GetType() == LayerType::Multiplication) + if (layer->GetType() == LayerType::ElementwiseBinary) { switch (layer->GetType()) { - case LayerType::Multiplication: + case LayerType::ElementwiseBinary: return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer); break; default: @@ -1993,7 +1993,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc") auto constant = graph.AddLayer<ConstantLayer>("const"); constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor); - IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul"); + IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output"); // Create connections between layers @@ -2015,7 +2015,10 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc") // Check the WorkingCopy is as expected before replacement CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; - LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, + LayerType::Constant, + LayerType::ElementwiseBinary, + LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2209,7 +2212,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews") auto constant = graph.AddLayer<ConstantLayer>("const"); constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor); - IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul"); + IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output"); // Create connections between layers @@ -2230,7 +2233,10 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews") // Check the WorkingCopy is as expected before replacement int idx=0; - LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, + LayerType::Constant, + LayerType::ElementwiseBinary, + LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2285,7 +2291,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots") auto constant = graph.AddLayer<ConstantLayer>("const"); constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor); - IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul"); + IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output"); // Create connections between layers @@ -2306,7 +2312,10 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots") // Check the WorkingCopy is as expected before replacement CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; - LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, + LayerType::Constant, + LayerType::ElementwiseBinary, + LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2346,7 +2355,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots") auto constant = graph.AddLayer<ConstantLayer>("const"); constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor); - IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul"); + IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul"); armnn::ViewsDescriptor splitterDesc(2,4); IConnectableLayer* split = graph.AddLayer<SplitterLayer>(splitterDesc, "split"); IConnectableLayer* abs = graph.AddLayer<ActivationLayer>(ActivationFunction::Abs, "abs"); @@ -2411,7 +2420,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots") CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; LayerType expectedSorted[] = {LayerType::Constant, - LayerType::Multiplication, + LayerType::ElementwiseBinary, LayerType::Splitter, LayerType::Activation}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) @@ -2532,7 +2541,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph") Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv"); Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation"); Layer* constLayer = graph.AddLayer<ConstantLayer>("const"); - Layer* addLayer = graph.AddLayer<AdditionLayer>("add"); + Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add"); Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1"); Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2"); @@ -2583,7 +2592,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph") // GetWorkingCopy() has caused address pointer of convolution layer to change. // Finding new address pointer... - if (layer->GetType() == LayerType::Addition) + if (layer->GetType() == LayerType::ElementwiseBinary) { addCopyLayer = layer; } @@ -2634,7 +2643,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots") Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv"); Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation"); Layer* constLayer = graph.AddLayer<ConstantLayer>("const"); - Layer* addLayer = graph.AddLayer<AdditionLayer>("add"); + Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add"); Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1"); Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2"); @@ -2660,7 +2669,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots") { // GetWorkingCopy() has caused address pointer of convolution layer to change. // Finding new address pointer... - if (layer->GetType() == LayerType::Addition) + if (layer->GetType() == LayerType::ElementwiseBinary) { addCopyLayer = layer; } diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp index 497c36b079..eb488a5bcb 100644 --- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp +++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,31 +34,40 @@ TEST_CASE(#testName) \ TEST_SUITE("TestNameOnlyLayerVisitor") { +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr) - } diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp index 3b8917192d..2ccbc9418c 100644 --- a/src/armnn/test/optimizations/FuseActivationTests.cpp +++ b/src/armnn/test/optimizations/FuseActivationTests.cpp @@ -1,11 +1,10 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LayersFwd.hpp" -#include <Network.hpp> #include <ResolveType.hpp> #include <armnn/INetwork.hpp> #include <GraphUtils.hpp> @@ -238,6 +237,7 @@ public: } }; +ARMNN_NO_DEPRECATE_WARN_BEGIN template<DataType ArmnnType, typename T = ResolveType<ArmnnType>> struct MultiplicationTest { @@ -272,7 +272,9 @@ struct MultiplicationTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN template<DataType ArmnnType, typename T = ResolveType<ArmnnType>> struct AdditionTest { @@ -307,7 +309,9 @@ struct AdditionTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN template<DataType ArmnnType, typename T = ResolveType<ArmnnType>> struct SubtractionTest { @@ -342,7 +346,9 @@ struct SubtractionTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN template<DataType ArmnnType, typename T = ResolveType<ArmnnType>> struct DivisionTest { @@ -377,6 +383,7 @@ struct DivisionTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END template<typename LayerTest, DataType ArmnnType> |