aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-15 15:06:23 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2023-03-30 13:40:17 +0000
commit52e90bf59ecbe90d33368d8fc1fd120f07658aaf (patch)
tree7ea7d3bb8148ce3973e0fd6abcd951437211255d /src/armnn
parent41f9d2a5bc060f6c63e80621ff2264a66fb298bd (diff)
downloadarmnn-52e90bf59ecbe90d33368d8fc1fd120f07658aaf.tar.gz
IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers
* Added Deprecation notices for old ElementwiseBinary layers. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Iebbbaff38cc9c347b25eb2f9054c914a4f931c68
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/LayersFwd.hpp10
-rw-r--r--src/armnn/Network.cpp24
-rw-r--r--src/armnn/Network.hpp7
-rw-r--r--src/armnn/layers/AdditionLayer.cpp4
-rw-r--r--src/armnn/layers/AdditionLayer.hpp5
-rw-r--r--src/armnn/layers/DivisionLayer.cpp4
-rw-r--r--src/armnn/layers/DivisionLayer.hpp5
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp5
-rw-r--r--src/armnn/layers/MaximumLayer.cpp4
-rw-r--r--src/armnn/layers/MaximumLayer.hpp5
-rw-r--r--src/armnn/layers/MinimumLayer.cpp4
-rw-r--r--src/armnn/layers/MinimumLayer.hpp5
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp4
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp5
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp4
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp6
-rw-r--r--src/armnn/test/OptimizerTests.cpp46
-rw-r--r--src/armnn/test/RuntimeTests.cpp18
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp2
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp45
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp13
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp11
22 files changed, 168 insertions, 68 deletions
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index f634272316..44b1699e36 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -108,7 +108,9 @@ constexpr LayerType LayerEnumOf(const T* = nullptr);
#define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
DECLARE_LAYER(Activation)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Addition)
+ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(ArgMinMax)
DECLARE_LAYER(BatchMatMul)
DECLARE_LAYER(BatchNormalization)
@@ -127,7 +129,9 @@ DECLARE_LAYER(DepthToSpace)
DECLARE_LAYER(DepthwiseConvolution2d)
DECLARE_LAYER(Dequantize)
DECLARE_LAYER(DetectionPostProcess)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Division)
+ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(ElementwiseBinary)
DECLARE_LAYER(ElementwiseUnary)
DECLARE_LAYER(FakeQuantization)
@@ -143,13 +147,17 @@ DECLARE_LAYER(LogicalBinary)
DECLARE_LAYER(LogSoftmax)
DECLARE_LAYER(Lstm)
DECLARE_LAYER(Map)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Maximum)
+ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(Mean)
DECLARE_LAYER(MemCopy)
DECLARE_LAYER(MemImport)
DECLARE_LAYER(Merge)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Minimum)
DECLARE_LAYER(Multiplication)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Normalization)
DECLARE_LAYER(Output)
DECLARE_LAYER(Pad)
@@ -174,7 +182,9 @@ DECLARE_LAYER(Splitter)
DECLARE_LAYER(Stack)
DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Subtraction)
+ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(Switch)
DECLARE_LAYER(Transpose)
DECLARE_LAYER(TransposeConvolution2d)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9ebb67b593..837b42e172 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -213,12 +213,16 @@ IConnectableLayer* INetwork::AddMergeLayer(const char* name)
IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddAdditionLayer(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddMultiplicationLayer(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
@@ -308,17 +312,23 @@ IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddDivisionLayer(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddSubtractionLayer(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddMaximumLayer(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
@@ -345,7 +355,9 @@ IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor&
IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddMinimumLayer(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
@@ -1984,22 +1996,30 @@ IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitter
IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<MaximumLayer>(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<MinimumLayer>(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<AdditionLayer>(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<MultiplicationLayer>(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
@@ -2238,12 +2258,16 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<DivisionLayer>(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<SubtractionLayer>(name);
+ ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 03642ce993..c6bf0859f7 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -43,7 +43,7 @@ public:
IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name = nullptr);
-
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
@@ -93,6 +93,7 @@ public:
const ConstTensor& anchors,
const char* name = nullptr);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
@@ -132,12 +133,15 @@ public:
const LstmInputParams& params,
const char* name = nullptr);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
@@ -208,6 +212,7 @@ public:
IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name = nullptr);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index 7117c14f92..cae96ad422 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,10 +27,12 @@ std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Addition, descriptor, PrepInfoAndDesc(descriptor));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
AdditionLayer* AdditionLayer::Clone(Graph& graph) const
{
return CloneBase<AdditionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void AdditionLayer::ExecuteStrategy(IStrategy &strategy) const
{
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 6980677cde..cd20ff58cb 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,8 @@
namespace armnn
{
/// This layer represents an addition operation.
-class AdditionLayer : public ElementwiseBaseLayer
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ AdditionLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Addition type.
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index e4e2a7d8b7..db9f93da6a 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,10 +27,12 @@ std::unique_ptr<IWorkload> DivisionLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Division, descriptor, PrepInfoAndDesc(descriptor));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DivisionLayer* DivisionLayer::Clone(Graph& graph) const
{
return CloneBase<DivisionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void DivisionLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 398a9477fd..bad96ea3ff 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,7 +11,8 @@ namespace armnn
{
/// This layer represents a division operation.
-class DivisionLayer : public ElementwiseBaseLayer
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ DivisionLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Division type.
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 17e8b446e0..79c49b5351 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,7 +13,8 @@ namespace armnn
/// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement:
/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const IWorkloadFactory& factory) const = 0;
/// Layer* Clone(Graph& graph) const = 0;
-class ElementwiseBaseLayer : public Layer
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ ElementwiseBaseLayer : public Layer
{
public:
/// Check if the input tensor shape(s)
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index f074cf92bd..6e180a260f 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,10 +26,12 @@ std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Maximum, descriptor, PrepInfoAndDesc(descriptor));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
MaximumLayer* MaximumLayer::Clone(Graph& graph) const
{
return CloneBase<MaximumLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void MaximumLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 2b113a428d..31b773ea94 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,7 +11,8 @@ namespace armnn
{
/// This layer represents a maximum operation.
-class MaximumLayer : public ElementwiseBaseLayer
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ MaximumLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Maximum type.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index f3661f9b5b..061794c0a7 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,10 +27,12 @@ std::unique_ptr<IWorkload> MinimumLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Minimum, descriptor, PrepInfoAndDesc(descriptor));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
MinimumLayer* MinimumLayer::Clone(Graph& graph) const
{
return CloneBase<MinimumLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void MinimumLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 17ef55ef9a..795d317959 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,7 +11,8 @@ namespace armnn
{
/// This layer represents a minimum operation.
-class MinimumLayer : public ElementwiseBaseLayer
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ MinimumLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Minimum type.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index bcc77dcc51..cc669471ab 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,10 +27,12 @@ std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const IWorkloadFa
return factory.CreateWorkload(LayerType::Multiplication, descriptor, PrepInfoAndDesc(descriptor));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
{
return CloneBase<MultiplicationLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void MultiplicationLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 2dea82279b..c1ddb3a0cb 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,7 +11,8 @@ namespace armnn
{
/// This layer represents a multiplication operation.
-class MultiplicationLayer : public ElementwiseBaseLayer
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ MultiplicationLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Multiplication type.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 0e92013351..19e4d5a83e 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,10 +27,12 @@ std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const IWorkloadFacto
return factory.CreateWorkload(LayerType::Subtraction, descriptor, PrepInfoAndDesc(descriptor));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
{
return CloneBase<SubtractionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void SubtractionLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 86d5f9ea03..6d2a2c5000 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,7 +11,9 @@ namespace armnn
{
/// This layer represents a subtraction operation.
-class SubtractionLayer : public ElementwiseBaseLayer
+
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
+ SubtractionLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Subtraction type.
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index f83900404b..ff42ab8cbb 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -339,7 +339,9 @@ TEST_CASE("InsertConvertersTest")
armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ ARMNN_NO_DEPRECATE_WARN_END
head->GetOutputHandler().SetTensorInfo(info);
graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
@@ -355,14 +357,16 @@ TEST_CASE("InsertConvertersTest")
->GetOutputHandler().SetTensorInfo(info);
// Check graph layer sequence before inserting convert layers
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
CHECK(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MemCopyLayer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+ ARMNN_NO_DEPRECATE_WARN_END
// Check layers have Float16 DataType
for (auto& layer : graph)
@@ -405,19 +409,21 @@ TEST_CASE("InsertConvertersTest")
}
// Check sequence of layers after inserting convert layers
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
CHECK(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::MemCopyLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+ ARMNN_NO_DEPRECATE_WARN_END
}
void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index e0d3a222fe..6b3fe0f211 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,6 @@
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
-#include <armnn/TypesUtils.hpp>
#include <armnn/profiling/ArmNNProfiling.hpp>
@@ -19,9 +18,6 @@
#include <test/ProfilingTestUtils.hpp>
-#include <HeapProfiling.hpp>
-#include <LeakChecking.hpp>
-
#ifdef WITH_VALGRIND
#include <valgrind/memcheck.h>
#endif
@@ -76,7 +72,9 @@ TEST_CASE("RuntimePreImportInputs")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1306,7 +1304,9 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1349,7 +1349,9 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1392,7 +1394,9 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1435,7 +1439,9 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1483,7 +1489,9 @@ TEST_CASE("SyncExecutePreImportInputsHappyPath")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 };
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 7b5d73a4e5..c33b248dc1 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -181,7 +181,9 @@ TEST_CASE("AbsTest")
TEST_CASE("AdditionTest")
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
+ ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("ArgMinMaxTest")
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index e0fd5fe7c1..4fcb476fcf 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -1054,7 +1054,7 @@ TEST_CASE("MultiInputSingleOutput")
auto layerX2 = graph.AddLayer<InputLayer>(1, "layerX2");
auto layerM1 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM1");
auto layerM2 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM2");
- auto layerM3 = graph.AddLayer<AdditionLayer>("layerM3");
+ auto layerM3 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "layerM3");
auto layerX3 = graph.AddLayer<OutputLayer>(0, "layerX3");
// X1 X2
@@ -1081,7 +1081,7 @@ TEST_CASE("MultiInputSingleOutput")
[](const Layer & l)
{
bool toSelect = (l.GetType() == LayerType::Activation
- || l.GetType() == LayerType::Addition);
+ || l.GetType() == LayerType::ElementwiseBinary);
return toSelect;
});
@@ -1772,7 +1772,7 @@ TEST_CASE("SubgraphCycles")
auto m0 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m0");
auto x1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x1");
auto m1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m1");
- auto m2 = graph.AddLayer<AdditionLayer>("m2");
+ auto m2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "m2");
auto x2 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x2");
x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0));
@@ -1872,7 +1872,7 @@ TEST_CASE("SubgraphViewWorkingCopy")
bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
IConnectableLayer* layer)
{
- if (layer->GetType() == LayerType::Multiplication)
+ if (layer->GetType() == LayerType::ElementwiseBinary)
{
IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0);
IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1);
@@ -1937,12 +1937,12 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
bool ReplaceTestMultiplication(SubgraphView& subgraph,
IConnectableLayer* layer)
{
- if (layer->GetType() == LayerType::Multiplication)
+ if (layer->GetType() == LayerType::ElementwiseBinary)
{
switch (layer->GetType())
{
- case LayerType::Multiplication:
+ case LayerType::ElementwiseBinary:
return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer);
break;
default:
@@ -1993,7 +1993,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
+ IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2015,7 +2015,10 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc")
// Check the WorkingCopy is as expected before replacement
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input,
+ LayerType::Constant,
+ LayerType::ElementwiseBinary,
+ LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2209,7 +2212,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
+ IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2230,7 +2233,10 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews")
// Check the WorkingCopy is as expected before replacement
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input,
+ LayerType::Constant,
+ LayerType::ElementwiseBinary,
+ LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2285,7 +2291,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
+ IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2306,7 +2312,10 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots")
// Check the WorkingCopy is as expected before replacement
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input,
+ LayerType::Constant,
+ LayerType::ElementwiseBinary,
+ LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2346,7 +2355,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
+ IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
armnn::ViewsDescriptor splitterDesc(2,4);
IConnectableLayer* split = graph.AddLayer<SplitterLayer>(splitterDesc, "split");
IConnectableLayer* abs = graph.AddLayer<ActivationLayer>(ActivationFunction::Abs, "abs");
@@ -2411,7 +2420,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots")
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
LayerType expectedSorted[] = {LayerType::Constant,
- LayerType::Multiplication,
+ LayerType::ElementwiseBinary,
LayerType::Splitter,
LayerType::Activation};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
@@ -2532,7 +2541,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph")
Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
- Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
+ Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2583,7 +2592,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph")
// GetWorkingCopy() has caused address pointer of convolution layer to change.
// Finding new address pointer...
- if (layer->GetType() == LayerType::Addition)
+ if (layer->GetType() == LayerType::ElementwiseBinary)
{
addCopyLayer = layer;
}
@@ -2634,7 +2643,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots")
Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
- Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
+ Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2660,7 +2669,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots")
{
// GetWorkingCopy() has caused address pointer of convolution layer to change.
// Finding new address pointer...
- if (layer->GetType() == LayerType::Addition)
+ if (layer->GetType() == LayerType::ElementwiseBinary)
{
addCopyLayer = layer;
}
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 497c36b079..eb488a5bcb 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,31 +34,40 @@ TEST_CASE(#testName) \
TEST_SUITE("TestNameOnlyLayerVisitor")
{
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr)
+ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr)
-
}
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 3b8917192d..2ccbc9418c 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -1,11 +1,10 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LayersFwd.hpp"
-#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
#include <GraphUtils.hpp>
@@ -238,6 +237,7 @@ public:
}
};
+ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct MultiplicationTest
{
@@ -272,7 +272,9 @@ struct MultiplicationTest
return {};
}
};
+ARMNN_NO_DEPRECATE_WARN_END
+ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct AdditionTest
{
@@ -307,7 +309,9 @@ struct AdditionTest
return {};
}
};
+ARMNN_NO_DEPRECATE_WARN_END
+ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct SubtractionTest
{
@@ -342,7 +346,9 @@ struct SubtractionTest
return {};
}
};
+ARMNN_NO_DEPRECATE_WARN_END
+ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct DivisionTest
{
@@ -377,6 +383,7 @@ struct DivisionTest
return {};
}
};
+ARMNN_NO_DEPRECATE_WARN_END
template<typename LayerTest,
DataType ArmnnType>