aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-31 18:00:00 +0100
committerMike Kelly <mike.kelly@arm.com>2023-03-31 18:03:19 +0100
commit1a05aad6d5adf3b25848ffd873a0e0e82756aa06 (patch)
tree973583209a4eeb916b42922189dc312a4d1effa2
parentc4fb0dd4145e05123c546458ba5d281abfcc2b28 (diff)
downloadarmnn-1a05aad6d5adf3b25848ffd873a0e0e82756aa06.tar.gz
Revert "IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers"
This reverts commit 52e90bf59ecbe90d33368d8fc1fd120f07658aaf. Change-Id: I5a0d244593d8e760ee7ba0c9d38c02377e1bdc24 Signed-off-by: Mike Kelly <mike.kelly@arm.com>
-rw-r--r--include/armnn/BackendHelper.hpp6
-rw-r--r--include/armnn/INetwork.hpp6
-rw-r--r--samples/PreImportMemorySample.cpp2
-rw-r--r--shim/sl/canonical/Converter.cpp24
-rw-r--r--src/armnn/LayersFwd.hpp10
-rw-r--r--src/armnn/Network.cpp24
-rw-r--r--src/armnn/Network.hpp7
-rw-r--r--src/armnn/layers/AdditionLayer.cpp4
-rw-r--r--src/armnn/layers/AdditionLayer.hpp5
-rw-r--r--src/armnn/layers/DivisionLayer.cpp4
-rw-r--r--src/armnn/layers/DivisionLayer.hpp5
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp5
-rw-r--r--src/armnn/layers/MaximumLayer.cpp4
-rw-r--r--src/armnn/layers/MaximumLayer.hpp5
-rw-r--r--src/armnn/layers/MinimumLayer.cpp4
-rw-r--r--src/armnn/layers/MinimumLayer.hpp5
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp4
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp5
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp4
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp6
-rw-r--r--src/armnn/test/OptimizerTests.cpp46
-rw-r--r--src/armnn/test/RuntimeTests.cpp18
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp2
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp45
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp13
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp11
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp16
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp15
-rw-r--r--src/backends/aclCommon/ArmComputeSubgraphUtils.hpp8
-rw-r--r--src/backends/backendsCommon/WorkloadFactoryBase.hpp8
-rw-r--r--src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp5
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp5
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp12
-rw-r--r--src/backends/backendsCommon/test/OptimizationViewsTests.cpp2
-rw-r--r--src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp4
-rw-r--r--src/backends/cl/ClBackend.cpp8
-rw-r--r--src/backends/cl/ClLayerSupport.cpp10
-rw-r--r--src/backends/cl/ClLayerSupport.hpp10
-rw-r--r--src/backends/neon/NeonBackend.cpp8
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp18
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp38
41 files changed, 84 insertions, 357 deletions
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index ddf2308da2..85aabe0dd2 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -42,7 +42,6 @@ public:
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use IsElementwiseBinarySupported instead", "24.02")
bool IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -157,7 +156,6 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use IsElementwiseBinarySupported instead", "24.02")
bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -246,7 +244,6 @@ public:
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use IsElementwiseBinarySupported instead", "24.02")
bool IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -270,13 +267,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use IsElementwiseBinarySupported instead", "24.02")
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use IsElementwiseBinarySupported instead", "24.02")
bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -405,7 +400,6 @@ public:
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use IsElementwiseBinarySupported instead", "24.02")
bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 273753752d..4eac0cfe2a 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -481,13 +481,11 @@ public:
/// Adds an addition layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
/// Adds a multiplication layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
/// Adds a batch normalization layer to the network.
@@ -605,19 +603,16 @@ public:
/// Adds a division layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
/// Adds a subtraction layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
/// Add a Maximum layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
/// Add a Mean layer to the network.
@@ -650,7 +645,6 @@ public:
/// Add a Minimum layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
/// Add Gather layer to the network.
diff --git a/samples/PreImportMemorySample.cpp b/samples/PreImportMemorySample.cpp
index 98f386bfdf..cf196fd20e 100644
--- a/samples/PreImportMemorySample.cpp
+++ b/samples/PreImportMemorySample.cpp
@@ -31,9 +31,7 @@ int main()
armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
// Set the tensors in the network.
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index 90fd71154f..be052a6faa 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -209,7 +209,6 @@ bool Converter::ConvertAdd(const Operation& operation, const Model& model, Conve
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsAdditionSupported,
data.m_Backends,
@@ -218,7 +217,6 @@ bool Converter::ConvertAdd(const Operation& operation, const Model& model, Conve
inputInfo0,
inputInfo1,
outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
};
if(!IsDynamicTensor(outputInfo))
@@ -235,9 +233,7 @@ bool Converter::ConvertAdd(const Operation& operation, const Model& model, Conve
return false;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
- ARMNN_NO_DEPRECATE_WARN_END
startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
@@ -1540,7 +1536,6 @@ bool Converter::ConvertDiv(const Operation& operation, const Model& model, Conve
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsDivisionSupported,
data.m_Backends,
@@ -1549,7 +1544,6 @@ bool Converter::ConvertDiv(const Operation& operation, const Model& model, Conve
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
};
if(!IsDynamicTensor(outputInfo))
@@ -1566,9 +1560,7 @@ bool Converter::ConvertDiv(const Operation& operation, const Model& model, Conve
return false;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
- ARMNN_NO_DEPRECATE_WARN_END
startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
@@ -3386,7 +3378,6 @@ bool Converter::ConvertMaximum(const Operation& operation, const Model& model, C
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMaximumSupported,
data.m_Backends,
@@ -3395,7 +3386,6 @@ bool Converter::ConvertMaximum(const Operation& operation, const Model& model, C
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outInfo);
- ARMNN_NO_DEPRECATE_WARN_END
};
if(IsDynamicTensor(outInfo))
@@ -3412,9 +3402,7 @@ bool Converter::ConvertMaximum(const Operation& operation, const Model& model, C
return false;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
- ARMNN_NO_DEPRECATE_WARN_END
layer->SetBackendId(setBackend);
assert(layer != nullptr);
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
@@ -3536,7 +3524,6 @@ bool Converter::ConvertMinimum(const Operation& operation, const Model& model, C
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMinimumSupported,
data.m_Backends,
@@ -3545,7 +3532,6 @@ bool Converter::ConvertMinimum(const Operation& operation, const Model& model, C
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
};
if(IsDynamicTensor(outputInfo))
@@ -3562,9 +3548,7 @@ bool Converter::ConvertMinimum(const Operation& operation, const Model& model, C
return false;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
- ARMNN_NO_DEPRECATE_WARN_END
layer->SetBackendId(setBackend);
assert(layer != nullptr);
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
@@ -3609,7 +3593,6 @@ bool Converter::ConvertMul(const Operation& operation, const Model& model, Conve
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMultiplicationSupported,
data.m_Backends,
@@ -3618,7 +3601,6 @@ bool Converter::ConvertMul(const Operation& operation, const Model& model, Conve
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
};
if(!IsDynamicTensor(outputInfo))
@@ -3635,9 +3617,7 @@ bool Converter::ConvertMul(const Operation& operation, const Model& model, Conve
return false;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
- ARMNN_NO_DEPRECATE_WARN_END
startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
@@ -5360,7 +5340,6 @@ bool Converter::ConvertSub(const Operation& operation, const Model& model, Conve
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSubtractionSupported,
data.m_Backends,
@@ -5369,7 +5348,6 @@ bool Converter::ConvertSub(const Operation& operation, const Model& model, Conve
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
};
if(IsDynamicTensor(outputInfo))
@@ -5386,9 +5364,7 @@ bool Converter::ConvertSub(const Operation& operation, const Model& model, Conve
return false;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
- ARMNN_NO_DEPRECATE_WARN_END
startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 44b1699e36..f634272316 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -108,9 +108,7 @@ constexpr LayerType LayerEnumOf(const T* = nullptr);
#define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
DECLARE_LAYER(Activation)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Addition)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(ArgMinMax)
DECLARE_LAYER(BatchMatMul)
DECLARE_LAYER(BatchNormalization)
@@ -129,9 +127,7 @@ DECLARE_LAYER(DepthToSpace)
DECLARE_LAYER(DepthwiseConvolution2d)
DECLARE_LAYER(Dequantize)
DECLARE_LAYER(DetectionPostProcess)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Division)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(ElementwiseBinary)
DECLARE_LAYER(ElementwiseUnary)
DECLARE_LAYER(FakeQuantization)
@@ -147,17 +143,13 @@ DECLARE_LAYER(LogicalBinary)
DECLARE_LAYER(LogSoftmax)
DECLARE_LAYER(Lstm)
DECLARE_LAYER(Map)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Maximum)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(Mean)
DECLARE_LAYER(MemCopy)
DECLARE_LAYER(MemImport)
DECLARE_LAYER(Merge)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Minimum)
DECLARE_LAYER(Multiplication)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Normalization)
DECLARE_LAYER(Output)
DECLARE_LAYER(Pad)
@@ -182,9 +174,7 @@ DECLARE_LAYER(Splitter)
DECLARE_LAYER(Stack)
DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER(Subtraction)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER(Switch)
DECLARE_LAYER(Transpose)
DECLARE_LAYER(TransposeConvolution2d)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 837b42e172..9ebb67b593 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -213,16 +213,12 @@ IConnectableLayer* INetwork::AddMergeLayer(const char* name)
IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddAdditionLayer(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddMultiplicationLayer(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
@@ -312,23 +308,17 @@ IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddDivisionLayer(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddSubtractionLayer(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddMaximumLayer(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
@@ -355,9 +345,7 @@ IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor&
IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return pNetworkImpl->AddMinimumLayer(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
@@ -1996,30 +1984,22 @@ IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitter
IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<MaximumLayer>(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<MinimumLayer>(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<AdditionLayer>(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<MultiplicationLayer>(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
@@ -2258,16 +2238,12 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<DivisionLayer>(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return m_Graph->AddLayer<SubtractionLayer>(name);
- ARMNN_NO_DEPRECATE_WARN_END
}
IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c6bf0859f7..03642ce993 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -43,7 +43,7 @@ public:
IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
+
IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
@@ -93,7 +93,6 @@ public:
const ConstTensor& anchors,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
@@ -133,15 +132,12 @@ public:
const LstmInputParams& params,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
@@ -212,7 +208,6 @@ public:
IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index cae96ad422..7117c14f92 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,12 +27,10 @@ std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Addition, descriptor, PrepInfoAndDesc(descriptor));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
AdditionLayer* AdditionLayer::Clone(Graph& graph) const
{
return CloneBase<AdditionLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
void AdditionLayer::ExecuteStrategy(IStrategy &strategy) const
{
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index cd20ff58cb..6980677cde 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,8 +10,7 @@
namespace armnn
{
/// This layer represents an addition operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- AdditionLayer : public ElementwiseBaseLayer
+class AdditionLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Addition type.
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index db9f93da6a..e4e2a7d8b7 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,12 +27,10 @@ std::unique_ptr<IWorkload> DivisionLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Division, descriptor, PrepInfoAndDesc(descriptor));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DivisionLayer* DivisionLayer::Clone(Graph& graph) const
{
return CloneBase<DivisionLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
void DivisionLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index bad96ea3ff..398a9477fd 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,8 +11,7 @@ namespace armnn
{
/// This layer represents a division operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- DivisionLayer : public ElementwiseBaseLayer
+class DivisionLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Division type.
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 79c49b5351..17e8b446e0 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,8 +13,7 @@ namespace armnn
/// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement:
/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const IWorkloadFactory& factory) const = 0;
/// Layer* Clone(Graph& graph) const = 0;
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- ElementwiseBaseLayer : public Layer
+class ElementwiseBaseLayer : public Layer
{
public:
/// Check if the input tensor shape(s)
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 6e180a260f..f074cf92bd 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,12 +26,10 @@ std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Maximum, descriptor, PrepInfoAndDesc(descriptor));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
MaximumLayer* MaximumLayer::Clone(Graph& graph) const
{
return CloneBase<MaximumLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
void MaximumLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 31b773ea94..2b113a428d 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,8 +11,7 @@ namespace armnn
{
/// This layer represents a maximum operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- MaximumLayer : public ElementwiseBaseLayer
+class MaximumLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Maximum type.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 061794c0a7..f3661f9b5b 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,12 +27,10 @@ std::unique_ptr<IWorkload> MinimumLayer::CreateWorkload(const IWorkloadFactory&
return factory.CreateWorkload(LayerType::Minimum, descriptor, PrepInfoAndDesc(descriptor));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
MinimumLayer* MinimumLayer::Clone(Graph& graph) const
{
return CloneBase<MinimumLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
void MinimumLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 795d317959..17ef55ef9a 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,8 +11,7 @@ namespace armnn
{
/// This layer represents a minimum operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- MinimumLayer : public ElementwiseBaseLayer
+class MinimumLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Minimum type.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index cc669471ab..bcc77dcc51 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,12 +27,10 @@ std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const IWorkloadFa
return factory.CreateWorkload(LayerType::Multiplication, descriptor, PrepInfoAndDesc(descriptor));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
{
return CloneBase<MultiplicationLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
void MultiplicationLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index c1ddb3a0cb..2dea82279b 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,8 +11,7 @@ namespace armnn
{
/// This layer represents a multiplication operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- MultiplicationLayer : public ElementwiseBaseLayer
+class MultiplicationLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Multiplication type.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 19e4d5a83e..0e92013351 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,12 +27,10 @@ std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const IWorkloadFacto
return factory.CreateWorkload(LayerType::Subtraction, descriptor, PrepInfoAndDesc(descriptor));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
{
return CloneBase<SubtractionLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
void SubtractionLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 6d2a2c5000..86d5f9ea03 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,9 +11,7 @@ namespace armnn
{
/// This layer represents a subtraction operation.
-
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
- SubtractionLayer : public ElementwiseBaseLayer
+class SubtractionLayer : public ElementwiseBaseLayer
{
public:
/// Makes a workload for the Subtraction type.
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index ff42ab8cbb..f83900404b 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -339,9 +339,7 @@ TEST_CASE("InsertConvertersTest")
armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
- ARMNN_NO_DEPRECATE_WARN_END
head->GetOutputHandler().SetTensorInfo(info);
graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
@@ -357,16 +355,14 @@ TEST_CASE("InsertConvertersTest")
->GetOutputHandler().SetTensorInfo(info);
// Check graph layer sequence before inserting convert layers
- ARMNN_NO_DEPRECATE_WARN_BEGIN
CHECK(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::MemCopyLayer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
- ARMNN_NO_DEPRECATE_WARN_END
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Check layers have Float16 DataType
for (auto& layer : graph)
@@ -409,21 +405,19 @@ TEST_CASE("InsertConvertersTest")
}
// Check sequence of layers after inserting convert layers
- ARMNN_NO_DEPRECATE_WARN_BEGIN
CHECK(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::MemCopyLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
- &IsLayerOfType<armnn::AdditionLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
- &IsLayerOfType<armnn::OutputLayer>));
- ARMNN_NO_DEPRECATE_WARN_END
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::OutputLayer>));
}
void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 6b3fe0f211..e0d3a222fe 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,6 +10,7 @@
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/TypesUtils.hpp>
#include <armnn/profiling/ArmNNProfiling.hpp>
@@ -18,6 +19,9 @@
#include <test/ProfilingTestUtils.hpp>
+#include <HeapProfiling.hpp>
+#include <LeakChecking.hpp>
+
#ifdef WITH_VALGRIND
#include <valgrind/memcheck.h>
#endif
@@ -72,9 +76,7 @@ TEST_CASE("RuntimePreImportInputs")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1304,9 +1306,7 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1349,9 +1349,7 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1394,9 +1392,7 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1439,9 +1435,7 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1489,9 +1483,7 @@ TEST_CASE("SyncExecutePreImportInputsHappyPath")
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
auto addLayer = testNetwork->AddAdditionLayer("add layer");
- ARMNN_NO_DEPRECATE_WARN_END
auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 };
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index c33b248dc1..7b5d73a4e5 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -181,9 +181,7 @@ TEST_CASE("AbsTest")
TEST_CASE("AdditionTest")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("ArgMinMaxTest")
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 4fcb476fcf..e0fd5fe7c1 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -1054,7 +1054,7 @@ TEST_CASE("MultiInputSingleOutput")
auto layerX2 = graph.AddLayer<InputLayer>(1, "layerX2");
auto layerM1 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM1");
auto layerM2 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM2");
- auto layerM3 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "layerM3");
+ auto layerM3 = graph.AddLayer<AdditionLayer>("layerM3");
auto layerX3 = graph.AddLayer<OutputLayer>(0, "layerX3");
// X1 X2
@@ -1081,7 +1081,7 @@ TEST_CASE("MultiInputSingleOutput")
[](const Layer & l)
{
bool toSelect = (l.GetType() == LayerType::Activation
- || l.GetType() == LayerType::ElementwiseBinary);
+ || l.GetType() == LayerType::Addition);
return toSelect;
});
@@ -1772,7 +1772,7 @@ TEST_CASE("SubgraphCycles")
auto m0 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m0");
auto x1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x1");
auto m1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m1");
- auto m2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "m2");
+ auto m2 = graph.AddLayer<AdditionLayer>("m2");
auto x2 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x2");
x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0));
@@ -1872,7 +1872,7 @@ TEST_CASE("SubgraphViewWorkingCopy")
bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
IConnectableLayer* layer)
{
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Multiplication)
{
IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0);
IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1);
@@ -1937,12 +1937,12 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
bool ReplaceTestMultiplication(SubgraphView& subgraph,
IConnectableLayer* layer)
{
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Multiplication)
{
switch (layer->GetType())
{
- case LayerType::ElementwiseBinary:
+ case LayerType::Multiplication:
return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer);
break;
default:
@@ -1993,7 +1993,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2015,10 +2015,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc")
// Check the WorkingCopy is as expected before replacement
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input,
- LayerType::Constant,
- LayerType::ElementwiseBinary,
- LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2212,7 +2209,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2233,10 +2230,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews")
// Check the WorkingCopy is as expected before replacement
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input,
- LayerType::Constant,
- LayerType::ElementwiseBinary,
- LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2291,7 +2285,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
IConnectableLayer* output = graph.AddLayer<OutputLayer>(0, "output");
// Create connections between layers
@@ -2312,10 +2306,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots")
// Check the WorkingCopy is as expected before replacement
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
- LayerType expectedSorted[] = {LayerType::Input,
- LayerType::Constant,
- LayerType::ElementwiseBinary,
- LayerType::Output};
+ LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
{
CHECK((expectedSorted[idx] == l->GetType()));
@@ -2355,7 +2346,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots")
auto constant = graph.AddLayer<ConstantLayer>("const");
constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- IConnectableLayer* mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
+ IConnectableLayer* mul = graph.AddLayer<MultiplicationLayer>("mul");
armnn::ViewsDescriptor splitterDesc(2,4);
IConnectableLayer* split = graph.AddLayer<SplitterLayer>(splitterDesc, "split");
IConnectableLayer* abs = graph.AddLayer<ActivationLayer>(ActivationFunction::Abs, "abs");
@@ -2420,7 +2411,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots")
CHECK(workingCopy.GetIConnectableLayers().size() == 4);
int idx=0;
LayerType expectedSorted[] = {LayerType::Constant,
- LayerType::ElementwiseBinary,
+ LayerType::Multiplication,
LayerType::Splitter,
LayerType::Activation};
workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
@@ -2541,7 +2532,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph")
Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
- Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
+ Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2592,7 +2583,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph")
// GetWorkingCopy() has caused address pointer of convolution layer to change.
// Finding new address pointer...
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Addition)
{
addCopyLayer = layer;
}
@@ -2643,7 +2634,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots")
Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
- Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
+ Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2669,7 +2660,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots")
{
// GetWorkingCopy() has caused address pointer of convolution layer to change.
// Finding new address pointer...
- if (layer->GetType() == LayerType::ElementwiseBinary)
+ if (layer->GetType() == LayerType::Addition)
{
addCopyLayer = layer;
}
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index eb488a5bcb..497c36b079 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,40 +34,31 @@ TEST_CASE(#testName) \
TEST_SUITE("TestNameOnlyLayerVisitor")
{
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr)
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName)
TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr)
+
}
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 2ccbc9418c..3b8917192d 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -1,10 +1,11 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LayersFwd.hpp"
+#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
#include <GraphUtils.hpp>
@@ -237,7 +238,6 @@ public:
}
};
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct MultiplicationTest
{
@@ -272,9 +272,7 @@ struct MultiplicationTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct AdditionTest
{
@@ -309,9 +307,7 @@ struct AdditionTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct SubtractionTest
{
@@ -346,9 +342,7 @@ struct SubtractionTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
struct DivisionTest
{
@@ -383,7 +377,6 @@ struct DivisionTest
return {};
}
};
-ARMNN_NO_DEPRECATE_WARN_END
template<typename LayerTest,
DataType ArmnnType>
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3998ee730d..6ddc971f36 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -32,9 +32,7 @@ TEST_CASE("SerializeAddition")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const additionLayer = network->AddAdditionLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
@@ -359,9 +357,7 @@ TEST_CASE("SerializeConstant")
armnn::INetworkPtr network(armnn::INetwork::Create());
armnn::IConnectableLayer* input = network->AddInputLayer(0);
armnn::IConnectableLayer* constant = network->AddConstantLayer(constTensor, layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* add = network->AddAdditionLayer();
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* output = network->AddOutputLayer(0);
input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -931,9 +927,7 @@ TEST_CASE("SerializeDivision")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const divisionLayer = network->AddDivisionLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(divisionLayer->GetInputSlot(0));
@@ -1633,9 +1627,7 @@ TEST_CASE("SerializeMaximum")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const maximumLayer = network->AddMaximumLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(maximumLayer->GetInputSlot(0));
@@ -1860,9 +1852,7 @@ TEST_CASE("SerializeMinimum")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const minimumLayer = network->AddMinimumLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(minimumLayer->GetInputSlot(0));
@@ -1888,9 +1878,7 @@ TEST_CASE("SerializeMultiplication")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const multiplicationLayer = network->AddMultiplicationLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
@@ -2748,9 +2736,7 @@ TEST_CASE("SerializeSubtraction")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* const subtractionLayer = network->AddSubtractionLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(subtractionLayer->GetInputSlot(0));
@@ -2959,9 +2945,7 @@ TEST_CASE("SerializeDeserializeNonLinearNetwork")
armnn::INetworkPtr network(armnn::INetwork::Create());
armnn::IConnectableLayer* input = network->AddInputLayer(0);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* add = network->AddAdditionLayer();
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* constant = network->AddConstantLayer(constTensor, layerName.c_str());
armnn::IConnectableLayer* output = network->AddOutputLayer(0);
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index b16f14dd00..691adbff9d 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -174,9 +174,8 @@ std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWork
armnn::Graph& graph)
{
// Creates the layer we're testing.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- auto* const layer = graph.AddLayer<SubtractionLayer>("layer");
- ARMNN_NO_DEPRECATE_WARN_END
+ SubtractionLayer* const layer = graph.AddLayer<SubtractionLayer>("layer");
+
auto activationDesc = std::make_shared<ActivationDescriptor>();
activationDesc->m_A = 10.0f;
activationDesc->m_B = 5.0f;
@@ -234,9 +233,8 @@ std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IW
armnn::Graph& graph)
{
// Creates the layer we're testing.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- auto* const layer = graph.AddLayer<MultiplicationLayer>("layer");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
+ MultiplicationLayer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
+
auto activationDesc = std::make_shared<ActivationDescriptor>();
activationDesc->m_A = 10.0f;
activationDesc->m_B = 5.0f;
@@ -291,9 +289,8 @@ std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloa
armnn::Graph& graph)
{
// Creates the layer we're testing.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- auto* const layer = graph.AddLayer<AdditionLayer>("layer");
- ARMNN_NO_DEPRECATE_WARN_END
+ AdditionLayer* const layer = graph.AddLayer<AdditionLayer>("layer");
+
auto activationDesc = std::make_shared<ActivationDescriptor>();
activationDesc->m_A = 10.0f;
activationDesc->m_B = 5.0f;
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index fb7a4e1387..599d3538eb 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -77,10 +77,8 @@ LayerType* FuseAdditionLayer(OptimizationViews& optimizationViews,
ActivationDescriptor& activationDesc,
std::string name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddAdditionLayer(name.c_str());
LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
- ARMNN_NO_DEPRECATE_WARN_END
FuseLayer(optimizationViews,
baseLayer,
@@ -98,10 +96,8 @@ LayerType* FuseSubtractionLayer(OptimizationViews& optimizationViews,
ActivationDescriptor& activationDesc,
std::string name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddSubtractionLayer(name.c_str());
LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
- ARMNN_NO_DEPRECATE_WARN_END
FuseLayer(optimizationViews,
baseLayer,
@@ -119,10 +115,8 @@ LayerType* FuseDivisionLayer(OptimizationViews& optimizationViews,
ActivationDescriptor& activationDesc,
std::string name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddDivisionLayer(name.c_str());
LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
- ARMNN_NO_DEPRECATE_WARN_END
FuseLayer(optimizationViews,
baseLayer,
@@ -140,10 +134,8 @@ LayerType* FuseMultiplicationLayer(OptimizationViews& optimizationViews,
ActivationDescriptor& activationDesc,
std::string name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddMultiplicationLayer(name.c_str());
LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
- ARMNN_NO_DEPRECATE_WARN_END
FuseLayer(optimizationViews,
baseLayer,
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index e793b44cf4..00e549c933 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,6 @@ public:
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
@@ -104,7 +103,6 @@ public:
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
@@ -154,7 +152,6 @@ public:
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
@@ -175,12 +172,10 @@ public:
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
@@ -253,7 +248,6 @@ public:
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
diff --git a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp
index a0d1af6ab7..f33521888f 100644
--- a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -30,7 +30,8 @@ armnn::INetworkPtr CreateAdditionNetwork(const armnn::TensorShape& inputXShape,
TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
- IConnectableLayer* addition = network->AddElementwiseBinaryLayer(BinaryOperation::Add, "addition");
+
+ IConnectableLayer* addition = network->AddAdditionLayer("addition");
IConnectableLayer* inputX = network->AddInputLayer(0, "inputX");
IConnectableLayer* inputY = network->AddInputLayer(1, "inputY");
IConnectableLayer* output = network->AddOutputLayer(0, "output");
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 9213f0eac9..795fc13c32 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -39,7 +39,7 @@ bool ConstantUsageTest(const std::vector<BackendId>& computeDevice,
IConnectableLayer* input = net->AddInputLayer(0);
IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData));
- IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add);
+ IConnectableLayer* add = net->AddAdditionLayer();
IConnectableLayer* output = net->AddOutputLayer(0);
input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -176,8 +176,7 @@ void EndToEndLayerTestImpl(INetworkPtr network,
for (unsigned int i = 0; i < out.size(); ++i)
{
CHECK_MESSAGE(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true,
- "Position: " << i <<" Actual output: " << static_cast<uint32_t>(out[i]) <<
- ". Expected output:" << static_cast<uint32_t>(it.second[i]));
+ "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
}
}
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 5475762a53..5b95d3cd92 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -622,9 +622,7 @@ struct LayerTypePolicy;
// Every entry in the armnn::LayerType enum must be accounted for below.
DECLARE_LAYER_POLICY_2_PARAM(Activation)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Addition)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
@@ -696,21 +694,15 @@ DECLARE_LAYER_POLICY_2_PARAM(Lstm)
DECLARE_LAYER_POLICY_MAP_PARAM(Map, void)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Maximum)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER_POLICY_2_PARAM(Mean)
DECLARE_LAYER_POLICY_1_PARAM(Merge)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Minimum)
-ARMNN_NO_DEPRECATE_WARN_END
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER_POLICY_2_PARAM(Normalization)
@@ -734,9 +726,7 @@ DECLARE_LAYER_POLICY_2_PARAM(QLstm)
DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Division)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER_POLICY_1_PARAM(Rank)
@@ -762,9 +752,7 @@ DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn)
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
-ARMNN_NO_DEPRECATE_WARN_END
DECLARE_LAYER_POLICY_2_PARAM(Reduce)
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index 665358b9c6..ff3217911a 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -263,9 +263,7 @@ TEST_CASE("OptimizeViewsValidateDeviceMockBackend")
armnn::IConnectableLayer* input = net->AddInputLayer(0, "inLayer0");
armnn::IConnectableLayer* input1 = net->AddInputLayer(1, "inLayer1");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
armnn::IConnectableLayer* addition = net->AddAdditionLayer("addLayer");
- ARMNN_NO_DEPRECATE_WARN_END
armnn::IConnectableLayer* output = net->AddOutputLayer(0, "outLayer");
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 7303733e17..f5a6c4217b 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -133,7 +133,6 @@ Pooling2dLayer* AddPoolingLayer(Graph& graph,
}
// Convenience function to add an addition layer to a graph
-ARMNN_NO_DEPRECATE_WARN_BEGIN
AdditionLayer* AddAdditionaLayer(Graph& graph,
LayerNameToLayerMap& layersInGraph,
const std::string& layerName,
@@ -145,7 +144,6 @@ AdditionLayer* AddAdditionaLayer(Graph& graph,
layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer));
return additionLayer;
}
-ARMNN_NO_DEPRECATE_WARN_END
// Convenience function to check that the given substitution matches the specified expected values
void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution,
@@ -752,9 +750,7 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, L
"conv2 layer unoptimizable", outputInfo);
Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
"conv3 layer", outputInfo);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
Layer* const outputLayer = AddOutputLayer(graph, "output layer");
// Connect the network
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 46ba9cb717..a10b6fbb43 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -461,7 +461,6 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
}
else if (base.GetType() == LayerType::Addition)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
arm_compute::Status status = ClAdditionValidate(
@@ -480,11 +479,9 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::Division)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
arm_compute::Status status = ClDivisionWorkloadValidate(
@@ -503,11 +500,9 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::Multiplication)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
arm_compute::Status status = ClMultiplicationWorkloadValidate(
@@ -526,11 +521,9 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::Subtraction)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
arm_compute::Status status = ClSubtractionValidate(
@@ -549,7 +542,6 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::ElementwiseBinary)
{
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index e1266c8299..89bcf9bc01 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -346,9 +346,7 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type,
case LayerType::Dequantize:
return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
case LayerType::Division:
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
case LayerType::ElementwiseBinary:
{
auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
@@ -476,22 +474,16 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type,
infos[2],
reasonIfUnsupported);
case LayerType::Maximum:
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
case LayerType::Mean:
return IsMeanSupported(infos[0],
infos[1],
*(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
reasonIfUnsupported);
case LayerType::Minimum:
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
case LayerType::Multiplication:
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
case LayerType::Normalization:
return IsNormalizationSupported(infos[0],
infos[1],
@@ -612,9 +604,7 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type,
*(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
reasonIfUnsupported);
case LayerType::Subtraction:
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
case LayerType::Transpose:
return IsTransposeSupported(infos[0],
infos[1],
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index fa28141dcb..2d784e3df8 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -30,7 +30,6 @@ public:
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
bool IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -129,14 +128,13 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reason = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsElementwiseUnarySupported(const TensorInfo& input,
- const TensorInfo& output,
+ const TensorInfo& ouput,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -202,7 +200,6 @@ public:
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
bool IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -213,13 +210,11 @@ public:
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -330,7 +325,6 @@ public:
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index c68f4ce95b..cea2aa3eba 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -313,7 +313,6 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
}
else if (base.GetType() == LayerType::Addition)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
arm_compute::Status status = NeonAdditionWorkloadValidate(
@@ -332,11 +331,9 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::Division)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
arm_compute::Status status = NeonDivisionWorkloadValidate(
@@ -355,11 +352,9 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::Multiplication)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
arm_compute::Status status = NeonMultiplicationWorkloadValidate(
@@ -378,11 +373,9 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::Subtraction)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
arm_compute::Status status = NeonSubtractionWorkloadValidate(
@@ -401,7 +394,6 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
untouched.erase(baseLayer->GetGuid());
untouched.erase(activationLayer->GetGuid());
}
- ARMNN_NO_DEPRECATE_WARN_END
}
else if (base.GetType() == LayerType::ElementwiseBinary)
{
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 19881c26a0..66718cc481 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -128,86 +128,70 @@ TEST_CASE("CreateAdditionFloat16Workload")
TEST_CASE("CreateAdditionFloatWorkload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_CASE("CreateSubtractionFloat16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
DataType::Float16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
#endif
TEST_CASE("CreateSubtractionFloatWorkload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateSubtractionUint8Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
DataType::QAsymmU8>();
- ARMNN_NO_DEPRECATE_WARN_END
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_CASE("CreateMultiplicationFloat16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
DataType::Float16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
#endif
TEST_CASE("CreateMultiplicationFloatWorkload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateMultiplicationUint8Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
DataType::QAsymmU8>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateDivisionFloatWorkloadTest")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
DivisionQueueDescriptor,
DivisionLayer,
armnn::DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 3bba0b7393..c46a9e5bac 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -142,182 +142,146 @@ TEST_CASE("CreateMultiplicationWorkloadWithBlobTest")
TEST_CASE("CreateAdditionFloatWorkload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
AdditionQueueDescriptor,
AdditionLayer,
armnn::DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateAdditionUint8Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
AdditionQueueDescriptor,
AdditionLayer,
armnn::DataType::QAsymmU8>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateAdditionInt16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
AdditionQueueDescriptor,
AdditionLayer,
armnn::DataType::QSymmS16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateAdditionInt32Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
AdditionQueueDescriptor,
AdditionLayer,
armnn::DataType::Signed32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateSubtractionFloat32Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateSubtractionFloat16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::Float16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateSubtractionUint8Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::QAsymmU8>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateSubtractionInt16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::QSymmS16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateSubtractionInt32Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::Signed32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateMultiplicationFloatWorkload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
MultiplicationQueueDescriptor,
MultiplicationLayer,
armnn::DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateMultiplicationUint8Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
MultiplicationQueueDescriptor,
MultiplicationLayer,
armnn::DataType::QAsymmU8>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateMultiplicationInt16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
MultiplicationQueueDescriptor,
MultiplicationLayer,
armnn::DataType::QSymmS16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateMultiplicationInt32Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
MultiplicationQueueDescriptor,
MultiplicationLayer,
armnn::DataType::Signed32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateDivisionFloat32Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
DivisionLayer,
armnn::DataType::Float32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateDivisionFloat16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
DivisionLayer,
armnn::DataType::Float16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateDivisionUint8Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
DivisionLayer,
armnn::DataType::QAsymmU8>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateDivisionInt16Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
DivisionQueueDescriptor,
DivisionLayer,
armnn::DataType::QSymmS16>();
- ARMNN_NO_DEPRECATE_WARN_END
}
TEST_CASE("CreateDivisionInt32Workload")
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
DivisionQueueDescriptor,
DivisionLayer,
armnn::DataType::Signed32>();
- ARMNN_NO_DEPRECATE_WARN_END
}
template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>