aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-31 18:00:00 +0100
committerMike Kelly <mike.kelly@arm.com>2023-03-31 18:03:19 +0100
commit1a05aad6d5adf3b25848ffd873a0e0e82756aa06 (patch)
tree973583209a4eeb916b42922189dc312a4d1effa2 /src/armnn/test
parentc4fb0dd4145e05123c546458ba5d281abfcc2b28 (diff)
downloadarmnn-1a05aad6d5adf3b25848ffd873a0e0e82756aa06.tar.gz
Revert "IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers"
This reverts commit 52e90bf59ecbe90d33368d8fc1fd120f07658aaf. Change-Id: I5a0d244593d8e760ee7ba0c9d38c02377e1bdc24 Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/OptimizerTests.cpp46
-rw-r--r--src/armnn/test/RuntimeTests.cpp18
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp2
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp45
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp13
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp11
6 files changed, 47 insertions, 88 deletions
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index ff42ab8cbb..f83900404b 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -339,9 +339,7 @@ TEST_CASE("InsertConvertersTest")
armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
- ARMNN_NO_DEPRECATE_WARN_END
head->GetOutputHandler().SetTensorInfo(info);
graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
@@ -357,16 +355,14 @@ TEST_CASE("InsertConvertersTest")
->GetOutputHandler().SetTensorInfo(info);
// Check graph layer sequence before inserting convert layers
- ARMNN_NO_DEPRECATE_WARN_BEGIN
CHECK(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MemCopyLayer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
- ARMNN_NO_DEPRECATE_WARN_END
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Check layers have Float16 DataType
for (auto& layer : graph)
@@ -409,21 +405,19 @@ TEST_CASE("InsertConvertersTest")
}
// Check sequence of layers after inserting convert layers
- ARMNN_NO_DEPRECATE_WARN_BEGIN
CHECK(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::MemCopyLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
- &IsLayerOfType<armnn::OutputLayer>));
- ARMNN_NO_DEPRECATE_WARN_END
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::OutputLayer>));
}
void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 6b3fe0f211..e0d3a222fe 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,6 +10,7 @@
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/TypesUtils.hpp>
#include <armnn/profiling/ArmNNProfiling.hpp>
@@ -18,6 +19,9 @@
#include <test/ProfilingTestUtils.hpp>
+#include <HeapProfiling.hpp>
+#include <LeakChecking.hpp>
+
#ifdef WITH_VALGRIND
#include <valgrind/memcheck.h>
#endif
@@ -72,9 +76,7 @@ TEST_CASE("RuntimePreImportInputs")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1304,9 +1306,7 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1349,9 +1349,7 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1394,9 +1392,7 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1439,9 +1435,7 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1489,9 +1483,7 @@ TEST_CASE("SyncExecutePreImportInputsHappyPath")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 };
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index c33b248dc1..7b5d73a4e5 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -181,9 +181,7 @@ TEST_CASE("AbsTest")
TEST_CASE("AdditionTest")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("ArgMinMaxTest")
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 4fcb476fcf..e0fd5fe7c1 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -1054,7 +1054,7 @@ TEST_CASE("MultiInputSingleOutput")
auto layerX2 = graph.AddLayer<InputLayer>(1, "layerX2");
auto layerM1 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM1");
auto layerM2 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM2");
- auto layerM3 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "layerM3");
+ auto layerM3 = graph.AddLayer<AdditionLayer>("layerM3");
auto layerX3 = graph.AddLayer<OutputLayer>(0, "layerX3");
// X1 X2
@@ -1081,7 +1081,7 @@ TEST_CASE("MultiInputSingleOutput")
[](const Layer & l)
{
bool toSelect = (l.GetType() == LayerType::Activation
- || l.GetType() == LayerType::ElementwiseBinary);
+ || l.GetType() == LayerType::Addition);
return toSelect;
});
@@ -1772,7 +1772,7 @@ TEST_CASE("SubgraphCycles")
auto m0 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m0");
auto x1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x1");
auto m1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m1");
- auto m2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "m2");
+ auto m2 = graph.AddLayer<AdditionLayer>("m2");
auto x2 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x2");
x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0));
@@ -1872,7 +1872,7 @@ TEST_CASE("SubgraphViewWorkingCopy")
bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
IConnectableLayer* layer)
{
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Multiplication)
{
IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0);
IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1);
@@ -1937,12 +1937,12 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
bool ReplaceTestMultiplication(SubgraphView& subgraph,
IConnectableLayer* layer)
{
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Multiplication)
{
switch (layer->GetType())
{
- case LayerType::ElementwiseBinary:
+ case LayerType::Multiplication:
return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer);
break;
default:
@@ -1993,7 +1993,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2015,10 +2015,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc")
// Check the WorkingCopy is as expected before replacement
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input,
- LayerType::Constant,
- LayerType::ElementwiseBinary,
- LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2212,7 +2209,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2233,10 +2230,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews")
// Check the WorkingCopy is as expected before replacement
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input,
- LayerType::Constant,
- LayerType::ElementwiseBinary,
- LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2291,7 +2285,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2312,10 +2306,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots")
// Check the WorkingCopy is as expected before replacement
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input,
- LayerType::Constant,
- LayerType::ElementwiseBinary,
- LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2355,7 +2346,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
armnn::ViewsDescriptor splitterDesc(2,4);
IConnectableLayer* split = graph.AddLayer<SplitterLayer>(splitterDesc, "split");
IConnectableLayer* abs = graph.AddLayer<ActivationLayer>(ActivationFunction::Abs, "abs");
@@ -2420,7 +2411,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots")
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
LayerType expectedSorted[] = {LayerType::Constant,
- LayerType::ElementwiseBinary,
+ LayerType::Multiplication,
LayerType::Splitter,
LayerType::Activation};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
@@ -2541,7 +2532,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph")
Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
- Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
+ Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2592,7 +2583,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph")
// GetWorkingCopy() has caused address pointer of convolution layer to change.
// Finding new address pointer...
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Addition)
{
addCopyLayer = layer;
}
@@ -2643,7 +2634,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots")
Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
- Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
+ Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2669,7 +2660,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots")
{
// GetWorkingCopy() has caused address pointer of convolution layer to change.
// Finding new address pointer...
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Addition)
{
addCopyLayer = layer;
}
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index eb488a5bcb..497c36b079 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,40 +34,31 @@ TEST_CASE(#testName) \
TEST_SUITE("TestNameOnlyLayerVisitor")
{
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr)
+
}
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 2ccbc9418c..3b8917192d 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -1,10 +1,11 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LayersFwd.hpp"
+#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
#include <GraphUtils.hpp>
@@ -237,7 +238,6 @@ public:
}
};
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct MultiplicationTest
{
@@ -272,9 +272,7 @@ struct MultiplicationTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct AdditionTest
{
@@ -309,9 +307,7 @@ struct AdditionTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct SubtractionTest
{
@@ -346,9 +342,7 @@ struct SubtractionTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct DivisionTest
{
@@ -383,7 +377,6 @@ struct DivisionTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
template<typename LayerTest,
DataType ArmnnType>