From 52e90bf59ecbe90d33368d8fc1fd120f07658aaf Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 15 Mar 2023 15:06:23 +0000 Subject: IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers * Added Deprecation notices for old ElementwiseBinary layers. Signed-off-by: Mike Kelly Change-Id: Iebbbaff38cc9c347b25eb2f9054c914a4f931c68 --- src/armnn/LayersFwd.hpp | 10 +++++ src/armnn/Network.cpp | 24 +++++++++++ src/armnn/Network.hpp | 7 +++- src/armnn/layers/AdditionLayer.cpp | 4 +- src/armnn/layers/AdditionLayer.hpp | 5 ++- src/armnn/layers/DivisionLayer.cpp | 4 +- src/armnn/layers/DivisionLayer.hpp | 5 ++- src/armnn/layers/ElementwiseBaseLayer.hpp | 5 ++- src/armnn/layers/MaximumLayer.cpp | 4 +- src/armnn/layers/MaximumLayer.hpp | 5 ++- src/armnn/layers/MinimumLayer.cpp | 4 +- src/armnn/layers/MinimumLayer.hpp | 5 ++- src/armnn/layers/MultiplicationLayer.cpp | 4 +- src/armnn/layers/MultiplicationLayer.hpp | 5 ++- src/armnn/layers/SubtractionLayer.cpp | 4 +- src/armnn/layers/SubtractionLayer.hpp | 6 ++- src/armnn/test/OptimizerTests.cpp | 46 ++++++++++++---------- src/armnn/test/RuntimeTests.cpp | 18 ++++++--- src/armnn/test/ShapeInferenceTests.cpp | 2 + src/armnn/test/SubgraphViewTests.cpp | 45 ++++++++++++--------- src/armnn/test/TestNameOnlyLayerVisitor.cpp | 13 +++++- .../test/optimizations/FuseActivationTests.cpp | 11 +++++- src/armnnSerializer/test/SerializerTests.cpp | 16 ++++++++ src/armnnTestUtils/CreateWorkload.hpp | 15 ++++--- src/backends/aclCommon/ArmComputeSubgraphUtils.hpp | 8 ++++ .../backendsCommon/WorkloadFactoryBase.hpp | 8 +++- .../test/AdditionEndToEndTestImpl.hpp | 5 +-- .../backendsCommon/test/EndToEndTestImpl.hpp | 5 ++- .../test/IsLayerSupportedTestImpl.hpp | 12 ++++++ .../backendsCommon/test/OptimizationViewsTests.cpp | 2 + .../test/OptimizeSubgraphViewTests.cpp | 4 ++ src/backends/cl/ClBackend.cpp | 8 ++++ src/backends/cl/ClLayerSupport.cpp | 10 +++++ src/backends/cl/ClLayerSupport.hpp | 10 ++++- src/backends/neon/NeonBackend.cpp | 8 ++++ src/backends/neon/test/NeonCreateWorkloadTests.cpp | 18 ++++++++- .../reference/test/RefCreateWorkloadTests.cpp | 38 +++++++++++++++++- 37 files changed, 319 insertions(+), 84 deletions(-) (limited to 'src') diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index f634272316..44b1699e36 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -108,7 +108,9 @@ constexpr LayerType LayerEnumOf(const T* = nullptr); #define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName) DECLARE_LAYER(Activation) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER(Addition) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER(ArgMinMax) DECLARE_LAYER(BatchMatMul) DECLARE_LAYER(BatchNormalization) @@ -127,7 +129,9 @@ DECLARE_LAYER(DepthToSpace) DECLARE_LAYER(DepthwiseConvolution2d) DECLARE_LAYER(Dequantize) DECLARE_LAYER(DetectionPostProcess) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER(Division) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER(ElementwiseBinary) DECLARE_LAYER(ElementwiseUnary) DECLARE_LAYER(FakeQuantization) @@ -143,13 +147,17 @@ DECLARE_LAYER(LogicalBinary) DECLARE_LAYER(LogSoftmax) DECLARE_LAYER(Lstm) DECLARE_LAYER(Map) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER(Maximum) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER(Mean) DECLARE_LAYER(MemCopy) DECLARE_LAYER(MemImport) DECLARE_LAYER(Merge) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER(Minimum) DECLARE_LAYER(Multiplication) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER(Normalization) DECLARE_LAYER(Output) DECLARE_LAYER(Pad) @@ -174,7 +182,9 @@ DECLARE_LAYER(Splitter) DECLARE_LAYER(Stack) DECLARE_LAYER(StandIn) DECLARE_LAYER(StridedSlice) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER(Subtraction) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER(Switch) DECLARE_LAYER(Transpose) DECLARE_LAYER(TransposeConvolution2d) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 9ebb67b593..837b42e172 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -213,12 +213,16 @@ IConnectableLayer* INetwork::AddMergeLayer(const char* name) IConnectableLayer* INetwork::AddAdditionLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return pNetworkImpl->AddAdditionLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return pNetworkImpl->AddMultiplicationLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc, @@ -308,17 +312,23 @@ IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor, IConnectableLayer* INetwork::AddDivisionLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return pNetworkImpl->AddDivisionLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* INetwork::AddSubtractionLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return pNetworkImpl->AddSubtractionLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* INetwork::AddMaximumLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return pNetworkImpl->AddMaximumLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name) @@ -345,7 +355,9 @@ IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor& IConnectableLayer* INetwork::AddMinimumLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return pNetworkImpl->AddMinimumLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor, @@ -1984,22 +1996,30 @@ IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitter IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return m_Graph->AddLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return m_Graph->AddLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return m_Graph->AddLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return m_Graph->AddLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name) @@ -2238,12 +2258,16 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor, IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return m_Graph->AddLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN return m_Graph->AddLayer(name); + ARMNN_NO_DEPRECATE_WARN_END } IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 03642ce993..c6bf0859f7 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -43,7 +43,7 @@ public: IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor, const char* name = nullptr); - + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02") IConnectableLayer* AddAdditionLayer(const char* name = nullptr); IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc, @@ -93,6 +93,7 @@ public: const ConstTensor& anchors, const char* name = nullptr); + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02") IConnectableLayer* AddDivisionLayer(const char* name = nullptr); IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor, @@ -132,12 +133,15 @@ public: const LstmInputParams& params, const char* name = nullptr); + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02") IConnectableLayer* AddMaximumLayer(const char* name = nullptr); IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr); + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02") IConnectableLayer* AddMinimumLayer(const char* name = nullptr); + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02") IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr); IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor, @@ -208,6 +212,7 @@ public: IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor, const char* name = nullptr); + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02") IConnectableLayer* AddSubtractionLayer(const char* name = nullptr); IConnectableLayer* AddSwitchLayer(const char* name = nullptr); diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp index 7117c14f92..cae96ad422 100644 --- a/src/armnn/layers/AdditionLayer.cpp +++ b/src/armnn/layers/AdditionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,10 +27,12 @@ std::unique_ptr AdditionLayer::CreateWorkload(const IWorkloadFactory& return factory.CreateWorkload(LayerType::Addition, descriptor, PrepInfoAndDesc(descriptor)); } +ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* AdditionLayer::Clone(Graph& graph) const { return CloneBase(graph, GetName()); } +ARMNN_NO_DEPRECATE_WARN_END void AdditionLayer::ExecuteStrategy(IStrategy &strategy) const { diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp index 6980677cde..cd20ff58cb 100644 --- a/src/armnn/layers/AdditionLayer.hpp +++ b/src/armnn/layers/AdditionLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -10,7 +10,8 @@ namespace armnn { /// This layer represents an addition operation. -class AdditionLayer : public ElementwiseBaseLayer +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + AdditionLayer : public ElementwiseBaseLayer { public: /// Makes a workload for the Addition type. diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp index e4e2a7d8b7..db9f93da6a 100644 --- a/src/armnn/layers/DivisionLayer.cpp +++ b/src/armnn/layers/DivisionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,10 +27,12 @@ std::unique_ptr DivisionLayer::CreateWorkload(const IWorkloadFactory& return factory.CreateWorkload(LayerType::Division, descriptor, PrepInfoAndDesc(descriptor)); } +ARMNN_NO_DEPRECATE_WARN_BEGIN DivisionLayer* DivisionLayer::Clone(Graph& graph) const { return CloneBase(graph, GetName()); } +ARMNN_NO_DEPRECATE_WARN_END void DivisionLayer::ExecuteStrategy(IStrategy& strategy) const { diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp index 398a9477fd..bad96ea3ff 100644 --- a/src/armnn/layers/DivisionLayer.hpp +++ b/src/armnn/layers/DivisionLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -11,7 +11,8 @@ namespace armnn { /// This layer represents a division operation. -class DivisionLayer : public ElementwiseBaseLayer +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + DivisionLayer : public ElementwiseBaseLayer { public: /// Makes a workload for the Division type. diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp index 17e8b446e0..79c49b5351 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.hpp +++ b/src/armnn/layers/ElementwiseBaseLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -13,7 +13,8 @@ namespace armnn /// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement: /// std::unique_ptr Layer::CreateWorkload(const IWorkloadFactory& factory) const = 0; /// Layer* Clone(Graph& graph) const = 0; -class ElementwiseBaseLayer : public Layer +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + ElementwiseBaseLayer : public Layer { public: /// Check if the input tensor shape(s) diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp index f074cf92bd..6e180a260f 100644 --- a/src/armnn/layers/MaximumLayer.cpp +++ b/src/armnn/layers/MaximumLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,10 +26,12 @@ std::unique_ptr MaximumLayer::CreateWorkload(const IWorkloadFactory& return factory.CreateWorkload(LayerType::Maximum, descriptor, PrepInfoAndDesc(descriptor)); } +ARMNN_NO_DEPRECATE_WARN_BEGIN MaximumLayer* MaximumLayer::Clone(Graph& graph) const { return CloneBase(graph, GetName()); } +ARMNN_NO_DEPRECATE_WARN_END void MaximumLayer::ExecuteStrategy(IStrategy& strategy) const { diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp index 2b113a428d..31b773ea94 100644 --- a/src/armnn/layers/MaximumLayer.hpp +++ b/src/armnn/layers/MaximumLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -11,7 +11,8 @@ namespace armnn { /// This layer represents a maximum operation. -class MaximumLayer : public ElementwiseBaseLayer +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + MaximumLayer : public ElementwiseBaseLayer { public: /// Makes a workload for the Maximum type. diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp index f3661f9b5b..061794c0a7 100644 --- a/src/armnn/layers/MinimumLayer.cpp +++ b/src/armnn/layers/MinimumLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,10 +27,12 @@ std::unique_ptr MinimumLayer::CreateWorkload(const IWorkloadFactory& return factory.CreateWorkload(LayerType::Minimum, descriptor, PrepInfoAndDesc(descriptor)); } +ARMNN_NO_DEPRECATE_WARN_BEGIN MinimumLayer* MinimumLayer::Clone(Graph& graph) const { return CloneBase(graph, GetName()); } +ARMNN_NO_DEPRECATE_WARN_END void MinimumLayer::ExecuteStrategy(IStrategy& strategy) const { diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp index 17ef55ef9a..795d317959 100644 --- a/src/armnn/layers/MinimumLayer.hpp +++ b/src/armnn/layers/MinimumLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -11,7 +11,8 @@ namespace armnn { /// This layer represents a minimum operation. -class MinimumLayer : public ElementwiseBaseLayer +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + MinimumLayer : public ElementwiseBaseLayer { public: /// Makes a workload for the Minimum type. diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp index bcc77dcc51..cc669471ab 100644 --- a/src/armnn/layers/MultiplicationLayer.cpp +++ b/src/armnn/layers/MultiplicationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,10 +27,12 @@ std::unique_ptr MultiplicationLayer::CreateWorkload(const IWorkloadFa return factory.CreateWorkload(LayerType::Multiplication, descriptor, PrepInfoAndDesc(descriptor)); } +ARMNN_NO_DEPRECATE_WARN_BEGIN MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const { return CloneBase(graph, GetName()); } +ARMNN_NO_DEPRECATE_WARN_END void MultiplicationLayer::ExecuteStrategy(IStrategy& strategy) const { diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp index 2dea82279b..c1ddb3a0cb 100644 --- a/src/armnn/layers/MultiplicationLayer.hpp +++ b/src/armnn/layers/MultiplicationLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -11,7 +11,8 @@ namespace armnn { /// This layer represents a multiplication operation. -class MultiplicationLayer : public ElementwiseBaseLayer +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + MultiplicationLayer : public ElementwiseBaseLayer { public: /// Makes a workload for the Multiplication type. diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp index 0e92013351..19e4d5a83e 100644 --- a/src/armnn/layers/SubtractionLayer.cpp +++ b/src/armnn/layers/SubtractionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,10 +27,12 @@ std::unique_ptr SubtractionLayer::CreateWorkload(const IWorkloadFacto return factory.CreateWorkload(LayerType::Subtraction, descriptor, PrepInfoAndDesc(descriptor)); } +ARMNN_NO_DEPRECATE_WARN_BEGIN SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const { return CloneBase(graph, GetName()); } +ARMNN_NO_DEPRECATE_WARN_END void SubtractionLayer::ExecuteStrategy(IStrategy& strategy) const { diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp index 86d5f9ea03..6d2a2c5000 100644 --- a/src/armnn/layers/SubtractionLayer.hpp +++ b/src/armnn/layers/SubtractionLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -11,7 +11,9 @@ namespace armnn { /// This layer represents a subtraction operation. -class SubtractionLayer : public ElementwiseBaseLayer + +class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02") + SubtractionLayer : public ElementwiseBaseLayer { public: /// Makes a workload for the Subtraction type. diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index f83900404b..ff42ab8cbb 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -339,7 +339,9 @@ TEST_CASE("InsertConvertersTest") armnn::Layer* head = graph.AddLayer(0, "output"); + ARMNN_NO_DEPRECATE_WARN_BEGIN head = graph.InsertNewLayer(head->GetInputSlot(0), ""); + ARMNN_NO_DEPRECATE_WARN_END head->GetOutputHandler().SetTensorInfo(info); graph.InsertNewLayer(head->GetInputSlot(1), inputId++, "") @@ -355,14 +357,16 @@ TEST_CASE("InsertConvertersTest") ->GetOutputHandler().SetTensorInfo(info); // Check graph layer sequence before inserting convert layers + ARMNN_NO_DEPRECATE_WARN_BEGIN CHECK(CheckSequence(graph.cbegin(), - graph.cend(), - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType)); + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + ARMNN_NO_DEPRECATE_WARN_END // Check layers have Float16 DataType for (auto& layer : graph) @@ -405,19 +409,21 @@ TEST_CASE("InsertConvertersTest") } // Check sequence of layers after inserting convert layers + ARMNN_NO_DEPRECATE_WARN_BEGIN CHECK(CheckSequence(graph.cbegin(), - graph.cend(), - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType)); + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + ARMNN_NO_DEPRECATE_WARN_END } void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape, diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index e0d3a222fe..6b3fe0f211 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -10,7 +10,6 @@ #include #include #include -#include #include @@ -19,9 +18,6 @@ #include -#include -#include - #ifdef WITH_VALGRIND #include #endif @@ -76,7 +72,9 @@ TEST_CASE("RuntimePreImportInputs") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1306,7 +1304,9 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1349,7 +1349,9 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1392,7 +1394,9 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1435,7 +1439,9 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; @@ -1483,7 +1489,9 @@ TEST_CASE("SyncExecutePreImportInputsHappyPath") auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto addLayer = testNetwork->AddAdditionLayer("add layer"); + ARMNN_NO_DEPRECATE_WARN_END auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 }; diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index 7b5d73a4e5..c33b248dc1 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -181,7 +181,9 @@ TEST_CASE("AbsTest") TEST_CASE("AdditionTest") { + ARMNN_NO_DEPRECATE_WARN_BEGIN CreateGraphAndRunTest({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add"); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("ArgMinMaxTest") diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp index e0fd5fe7c1..4fcb476fcf 100644 --- a/src/armnn/test/SubgraphViewTests.cpp +++ b/src/armnn/test/SubgraphViewTests.cpp @@ -1054,7 +1054,7 @@ TEST_CASE("MultiInputSingleOutput") auto layerX2 = graph.AddLayer(1, "layerX2"); auto layerM1 = graph.AddLayer(activationDefaults, "layerM1"); auto layerM2 = graph.AddLayer(activationDefaults, "layerM2"); - auto layerM3 = graph.AddLayer("layerM3"); + auto layerM3 = graph.AddLayer(BinaryOperation::Add, "layerM3"); auto layerX3 = graph.AddLayer(0, "layerX3"); // X1 X2 @@ -1081,7 +1081,7 @@ TEST_CASE("MultiInputSingleOutput") [](const Layer & l) { bool toSelect = (l.GetType() == LayerType::Activation - || l.GetType() == LayerType::Addition); + || l.GetType() == LayerType::ElementwiseBinary); return toSelect; }); @@ -1772,7 +1772,7 @@ TEST_CASE("SubgraphCycles") auto m0 = graph.AddLayer(ActivationDescriptor{}, "m0"); auto x1 = graph.AddLayer(ActivationDescriptor{}, "x1"); auto m1 = graph.AddLayer(ActivationDescriptor{}, "m1"); - auto m2 = graph.AddLayer("m2"); + auto m2 = graph.AddLayer(BinaryOperation::Add, "m2"); auto x2 = graph.AddLayer(ActivationDescriptor{}, "x2"); x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0)); @@ -1872,7 +1872,7 @@ TEST_CASE("SubgraphViewWorkingCopy") bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph, IConnectableLayer* layer) { - if (layer->GetType() == LayerType::Multiplication) + if (layer->GetType() == LayerType::ElementwiseBinary) { IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0); IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1); @@ -1937,12 +1937,12 @@ bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph, bool ReplaceTestMultiplication(SubgraphView& subgraph, IConnectableLayer* layer) { - if (layer->GetType() == LayerType::Multiplication) + if (layer->GetType() == LayerType::ElementwiseBinary) { switch (layer->GetType()) { - case LayerType::Multiplication: + case LayerType::ElementwiseBinary: return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer); break; default: @@ -1993,7 +1993,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer("mul"); + IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); IConnectableLayer* output = graph.AddLayer(0, "output"); // Create connections between layers @@ -2015,7 +2015,10 @@ TEST_CASE("SubgraphViewWorkingCopyReplacementFunc") // Check the WorkingCopy is as expected before replacement CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; - LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, + LayerType::Constant, + LayerType::ElementwiseBinary, + LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2209,7 +2212,7 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer("mul"); + IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); IConnectableLayer* output = graph.AddLayer(0, "output"); // Create connections between layers @@ -2230,7 +2233,10 @@ TEST_CASE("SubgraphViewWorkingCopyOptimizationViews") // Check the WorkingCopy is as expected before replacement int idx=0; - LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, + LayerType::Constant, + LayerType::ElementwiseBinary, + LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2285,7 +2291,7 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer("mul"); + IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); IConnectableLayer* output = graph.AddLayer(0, "output"); // Create connections between layers @@ -2306,7 +2312,10 @@ TEST_CASE("SubgraphViewWorkingCopyReplaceSlots") // Check the WorkingCopy is as expected before replacement CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; - LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output}; + LayerType expectedSorted[] = {LayerType::Input, + LayerType::Constant, + LayerType::ElementwiseBinary, + LayerType::Output}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) { CHECK((expectedSorted[idx] == l->GetType())); @@ -2346,7 +2355,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots") auto constant = graph.AddLayer("const"); constant->m_LayerOutput = std::make_shared(constTensor); - IConnectableLayer* mul = graph.AddLayer("mul"); + IConnectableLayer* mul = graph.AddLayer(BinaryOperation::Mul, "mul"); armnn::ViewsDescriptor splitterDesc(2,4); IConnectableLayer* split = graph.AddLayer(splitterDesc, "split"); IConnectableLayer* abs = graph.AddLayer(ActivationFunction::Abs, "abs"); @@ -2411,7 +2420,7 @@ TEST_CASE("SubgraphViewWorkingCopyCloneInputAndOutputSlots") CHECK(workingCopy.GetIConnectableLayers().size() == 4); int idx=0; LayerType expectedSorted[] = {LayerType::Constant, - LayerType::Multiplication, + LayerType::ElementwiseBinary, LayerType::Splitter, LayerType::Activation}; workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l) @@ -2532,7 +2541,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph") Layer* convLayer = graph.AddLayer(Convolution2dDescriptor(), "conv"); Layer* reluLayer = graph.AddLayer(ActivationDescriptor(), "activation"); Layer* constLayer = graph.AddLayer("const"); - Layer* addLayer = graph.AddLayer("add"); + Layer* addLayer = graph.AddLayer(BinaryOperation::Add, "add"); Layer* outputLayer1 = graph.AddLayer(0, "output1"); Layer* outputLayer2 = graph.AddLayer(1, "output2"); @@ -2583,7 +2592,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraph") // GetWorkingCopy() has caused address pointer of convolution layer to change. // Finding new address pointer... - if (layer->GetType() == LayerType::Addition) + if (layer->GetType() == LayerType::ElementwiseBinary) { addCopyLayer = layer; } @@ -2634,7 +2643,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots") Layer* convLayer = graph.AddLayer(Convolution2dDescriptor(), "conv"); Layer* reluLayer = graph.AddLayer(ActivationDescriptor(), "activation"); Layer* constLayer = graph.AddLayer("const"); - Layer* addLayer = graph.AddLayer("add"); + Layer* addLayer = graph.AddLayer(BinaryOperation::Add, "add"); Layer* outputLayer1 = graph.AddLayer(0, "output1"); Layer* outputLayer2 = graph.AddLayer(1, "output2"); @@ -2660,7 +2669,7 @@ TEST_CASE("MultipleInputMultipleOutputSlots_SubstituteGraphNewSlots") { // GetWorkingCopy() has caused address pointer of convolution layer to change. // Finding new address pointer... - if (layer->GetType() == LayerType::Addition) + if (layer->GetType() == LayerType::ElementwiseBinary) { addCopyLayer = layer; } diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp index 497c36b079..eb488a5bcb 100644 --- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp +++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,31 +34,40 @@ TEST_CASE(#testName) \ TEST_SUITE("TestNameOnlyLayerVisitor") { +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr) TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_BEGIN TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr) +ARMNN_NO_DEPRECATE_WARN_END TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName) TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr) - } diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp index 3b8917192d..2ccbc9418c 100644 --- a/src/armnn/test/optimizations/FuseActivationTests.cpp +++ b/src/armnn/test/optimizations/FuseActivationTests.cpp @@ -1,11 +1,10 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LayersFwd.hpp" -#include #include #include #include @@ -238,6 +237,7 @@ public: } }; +ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct MultiplicationTest { @@ -272,7 +272,9 @@ struct MultiplicationTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct AdditionTest { @@ -307,7 +309,9 @@ struct AdditionTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct SubtractionTest { @@ -342,7 +346,9 @@ struct SubtractionTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN template> struct DivisionTest { @@ -377,6 +383,7 @@ struct DivisionTest return {}; } }; +ARMNN_NO_DEPRECATE_WARN_END template diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 6ddc971f36..3998ee730d 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -32,7 +32,9 @@ TEST_CASE("SerializeAddition") armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const additionLayer = network->AddAdditionLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer0->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0)); @@ -357,7 +359,9 @@ TEST_CASE("SerializeConstant") armnn::INetworkPtr network(armnn::INetwork::Create()); armnn::IConnectableLayer* input = network->AddInputLayer(0); armnn::IConnectableLayer* constant = network->AddConstantLayer(constTensor, layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* add = network->AddAdditionLayer(); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* output = network->AddOutputLayer(0); input->GetOutputSlot(0).Connect(add->GetInputSlot(0)); @@ -927,7 +931,9 @@ TEST_CASE("SerializeDivision") armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const divisionLayer = network->AddDivisionLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer0->GetOutputSlot(0).Connect(divisionLayer->GetInputSlot(0)); @@ -1627,7 +1633,9 @@ TEST_CASE("SerializeMaximum") armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const maximumLayer = network->AddMaximumLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer0->GetOutputSlot(0).Connect(maximumLayer->GetInputSlot(0)); @@ -1852,7 +1860,9 @@ TEST_CASE("SerializeMinimum") armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const minimumLayer = network->AddMinimumLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer0->GetOutputSlot(0).Connect(minimumLayer->GetInputSlot(0)); @@ -1878,7 +1888,9 @@ TEST_CASE("SerializeMultiplication") armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const multiplicationLayer = network->AddMultiplicationLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer0->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0)); @@ -2736,7 +2748,9 @@ TEST_CASE("SerializeSubtraction") armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const subtractionLayer = network->AddSubtractionLayer(layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayer0->GetOutputSlot(0).Connect(subtractionLayer->GetInputSlot(0)); @@ -2945,7 +2959,9 @@ TEST_CASE("SerializeDeserializeNonLinearNetwork") armnn::INetworkPtr network(armnn::INetwork::Create()); armnn::IConnectableLayer* input = network->AddInputLayer(0); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* add = network->AddAdditionLayer(); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* constant = network->AddConstantLayer(constTensor, layerName.c_str()); armnn::IConnectableLayer* output = network->AddOutputLayer(0); diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp index 691adbff9d..b16f14dd00 100644 --- a/src/armnnTestUtils/CreateWorkload.hpp +++ b/src/armnnTestUtils/CreateWorkload.hpp @@ -174,8 +174,9 @@ std::unique_ptr CreateSubtractionWithBlobWorkloadTest(armnn::IWork armnn::Graph& graph) { // Creates the layer we're testing. - SubtractionLayer* const layer = graph.AddLayer("layer"); - + ARMNN_NO_DEPRECATE_WARN_BEGIN + auto* const layer = graph.AddLayer("layer"); + ARMNN_NO_DEPRECATE_WARN_END auto activationDesc = std::make_shared(); activationDesc->m_A = 10.0f; activationDesc->m_B = 5.0f; @@ -233,8 +234,9 @@ std::unique_ptr CreateMultiplicationWithBlobWorkloadTest(armnn::IW armnn::Graph& graph) { // Creates the layer we're testing. - MultiplicationLayer* const layer = graph.AddLayer("layer"); - + ARMNN_NO_DEPRECATE_WARN_BEGIN + auto* const layer = graph.AddLayer("layer"); + ARMNN_NO_DEPRECATE_WARN_BEGIN auto activationDesc = std::make_shared(); activationDesc->m_A = 10.0f; activationDesc->m_B = 5.0f; @@ -289,8 +291,9 @@ std::unique_ptr CreateAdditionWithBlobWorkloadTest(armnn::IWorkloa armnn::Graph& graph) { // Creates the layer we're testing. - AdditionLayer* const layer = graph.AddLayer("layer"); - + ARMNN_NO_DEPRECATE_WARN_BEGIN + auto* const layer = graph.AddLayer("layer"); + ARMNN_NO_DEPRECATE_WARN_END auto activationDesc = std::make_shared(); activationDesc->m_A = 10.0f; activationDesc->m_B = 5.0f; diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp index 599d3538eb..fb7a4e1387 100644 --- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp +++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp @@ -77,8 +77,10 @@ LayerType* FuseAdditionLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddAdditionLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, @@ -96,8 +98,10 @@ LayerType* FuseSubtractionLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddSubtractionLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, @@ -115,8 +119,10 @@ LayerType* FuseDivisionLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddDivisionLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, @@ -134,8 +140,10 @@ LayerType* FuseMultiplicationLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddMultiplicationLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 00e549c933..e793b44cf4 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019-2023 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,6 +43,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateAddition(const AdditionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -103,6 +104,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateDivision(const DivisionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -152,6 +154,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -172,10 +175,12 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -248,6 +253,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp index f33521888f..a0d1af6ab7 100644 --- a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -30,8 +30,7 @@ armnn::INetworkPtr CreateAdditionNetwork(const armnn::TensorShape& inputXShape, TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); - - IConnectableLayer* addition = network->AddAdditionLayer("addition"); + IConnectableLayer* addition = network->AddElementwiseBinaryLayer(BinaryOperation::Add, "addition"); IConnectableLayer* inputX = network->AddInputLayer(0, "inputX"); IConnectableLayer* inputY = network->AddInputLayer(1, "inputY"); IConnectableLayer* output = network->AddOutputLayer(0, "output"); diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 795fc13c32..9213f0eac9 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -39,7 +39,7 @@ bool ConstantUsageTest(const std::vector& computeDevice, IConnectableLayer* input = net->AddInputLayer(0); IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData)); - IConnectableLayer* add = net->AddAdditionLayer(); + IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add); IConnectableLayer* output = net->AddOutputLayer(0); input->GetOutputSlot(0).Connect(add->GetInputSlot(0)); @@ -176,7 +176,8 @@ void EndToEndLayerTestImpl(INetworkPtr network, for (unsigned int i = 0; i < out.size(); ++i) { CHECK_MESSAGE(Compare(it.second[i], out[i], tolerance) == true, - "Actual output: " << out[i] << ". Expected output:" << it.second[i]); + "Position: " << i <<" Actual output: " << static_cast(out[i]) << + ". Expected output:" << static_cast(it.second[i])); } } diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 5b95d3cd92..5475762a53 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -622,7 +622,9 @@ struct LayerTypePolicy; // Every entry in the armnn::LayerType enum must be accounted for below. DECLARE_LAYER_POLICY_2_PARAM(Activation) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Addition) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax) @@ -694,15 +696,21 @@ DECLARE_LAYER_POLICY_2_PARAM(Lstm) DECLARE_LAYER_POLICY_MAP_PARAM(Map, void) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Maximum) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(Mean) DECLARE_LAYER_POLICY_1_PARAM(Merge) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Minimum) +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Multiplication) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(Normalization) @@ -726,7 +734,9 @@ DECLARE_LAYER_POLICY_2_PARAM(QLstm) DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Division) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_1_PARAM(Rank) @@ -752,7 +762,9 @@ DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn) DECLARE_LAYER_POLICY_2_PARAM(StridedSlice) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Subtraction) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(Reduce) diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp index ff3217911a..665358b9c6 100644 --- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp +++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp @@ -263,7 +263,9 @@ TEST_CASE("OptimizeViewsValidateDeviceMockBackend") armnn::IConnectableLayer* input = net->AddInputLayer(0, "inLayer0"); armnn::IConnectableLayer* input1 = net->AddInputLayer(1, "inLayer1"); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* addition = net->AddAdditionLayer("addLayer"); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* output = net->AddOutputLayer(0, "outLayer"); diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp index f5a6c4217b..7303733e17 100644 --- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp +++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp @@ -133,6 +133,7 @@ Pooling2dLayer* AddPoolingLayer(Graph& graph, } // Convenience function to add an addition layer to a graph +ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* AddAdditionaLayer(Graph& graph, LayerNameToLayerMap& layersInGraph, const std::string& layerName, @@ -144,6 +145,7 @@ AdditionLayer* AddAdditionaLayer(Graph& graph, layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer)); return additionLayer; } +ARMNN_NO_DEPRECATE_WARN_END // Convenience function to check that the given substitution matches the specified expected values void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution, @@ -750,7 +752,9 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, L "conv2 layer unoptimizable", outputInfo); Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, "conv3 layer", outputInfo); + ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo); + ARMNN_NO_DEPRECATE_WARN_END Layer* const outputLayer = AddOutputLayer(graph, "output layer"); // Connect the network diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index a10b6fbb43..46ba9cb717 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -461,6 +461,7 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, } else if (base.GetType() == LayerType::Addition) { + ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClAdditionValidate( @@ -479,9 +480,11 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Division) { + ARMNN_NO_DEPRECATE_WARN_BEGIN DivisionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClDivisionWorkloadValidate( @@ -500,9 +503,11 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Multiplication) { + ARMNN_NO_DEPRECATE_WARN_BEGIN MultiplicationLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClMultiplicationWorkloadValidate( @@ -521,9 +526,11 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Subtraction) { + ARMNN_NO_DEPRECATE_WARN_BEGIN SubtractionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClSubtractionValidate( @@ -542,6 +549,7 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::ElementwiseBinary) { diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 89bcf9bc01..e1266c8299 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -346,7 +346,9 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, case LayerType::Dequantize: return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported); case LayerType::Division: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::ElementwiseBinary: { auto desc = *(PolymorphicDowncast(&descriptor)); @@ -474,16 +476,22 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, infos[2], reasonIfUnsupported); case LayerType::Maximum: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Mean: return IsMeanSupported(infos[0], infos[1], *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); case LayerType::Minimum: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Multiplication: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Normalization: return IsNormalizationSupported(infos[0], infos[1], @@ -604,7 +612,9 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); case LayerType::Subtraction: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Transpose: return IsTransposeSupported(infos[0], infos[1], diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 2d784e3df8..fa28141dcb 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -30,6 +30,7 @@ public: const ActivationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsAdditionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -128,13 +129,14 @@ public: const Optional& biases, Optional reason = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsDivisionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsElementwiseUnarySupported(const TensorInfo& input, - const TensorInfo& ouput, + const TensorInfo& output, const ElementwiseUnaryDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -200,6 +202,7 @@ public: const LstmInputParamsInfo& paramsInfo, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsMaximumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -210,11 +213,13 @@ public: const MeanDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -325,6 +330,7 @@ public: const StridedSliceDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index cea2aa3eba..c68f4ce95b 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -313,6 +313,7 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph } else if (base.GetType() == LayerType::Addition) { + ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonAdditionWorkloadValidate( @@ -331,9 +332,11 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Division) { + ARMNN_NO_DEPRECATE_WARN_BEGIN DivisionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonDivisionWorkloadValidate( @@ -352,9 +355,11 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Multiplication) { + ARMNN_NO_DEPRECATE_WARN_BEGIN MultiplicationLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonMultiplicationWorkloadValidate( @@ -373,9 +378,11 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Subtraction) { + ARMNN_NO_DEPRECATE_WARN_BEGIN SubtractionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonSubtractionWorkloadValidate( @@ -394,6 +401,7 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::ElementwiseBinary) { diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 66718cc481..19881c26a0 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -128,70 +128,86 @@ TEST_CASE("CreateAdditionFloat16Workload") TEST_CASE("CreateAdditionFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_CASE("CreateSubtractionFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #endif TEST_CASE("CreateSubtractionFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_CASE("CreateMultiplicationFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #endif TEST_CASE("CreateMultiplicationFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionFloatWorkloadTest") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } template diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index c46a9e5bac..3bba0b7393 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -142,146 +142,182 @@ TEST_CASE("CreateMultiplicationWorkloadWithBlobTest") TEST_CASE("CreateAdditionFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateAdditionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateAdditionInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateAdditionInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionFloat32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::Float16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionFloat32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::Float16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } template -- cgit v1.2.1