From 52e90bf59ecbe90d33368d8fc1fd120f07658aaf Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 15 Mar 2023 15:06:23 +0000 Subject: IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers * Added Deprecation notices for old ElementwiseBinary layers. Signed-off-by: Mike Kelly Change-Id: Iebbbaff38cc9c347b25eb2f9054c914a4f931c68 --- src/backends/aclCommon/ArmComputeSubgraphUtils.hpp | 8 +++++ .../backendsCommon/WorkloadFactoryBase.hpp | 8 ++++- .../test/AdditionEndToEndTestImpl.hpp | 5 ++- .../backendsCommon/test/EndToEndTestImpl.hpp | 5 +-- .../test/IsLayerSupportedTestImpl.hpp | 12 +++++++ .../backendsCommon/test/OptimizationViewsTests.cpp | 2 ++ .../test/OptimizeSubgraphViewTests.cpp | 4 +++ src/backends/cl/ClBackend.cpp | 8 +++++ src/backends/cl/ClLayerSupport.cpp | 10 ++++++ src/backends/cl/ClLayerSupport.hpp | 10 ++++-- src/backends/neon/NeonBackend.cpp | 8 +++++ src/backends/neon/test/NeonCreateWorkloadTests.cpp | 18 +++++++++- .../reference/test/RefCreateWorkloadTests.cpp | 38 +++++++++++++++++++++- 13 files changed, 126 insertions(+), 10 deletions(-) (limited to 'src/backends') diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp index 599d3538eb..fb7a4e1387 100644 --- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp +++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp @@ -77,8 +77,10 @@ LayerType* FuseAdditionLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddAdditionLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, @@ -96,8 +98,10 @@ LayerType* FuseSubtractionLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddSubtractionLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, @@ -115,8 +119,10 @@ LayerType* FuseDivisionLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddDivisionLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, @@ -134,8 +140,10 @@ LayerType* FuseMultiplicationLayer(OptimizationViews& optimizationViews, ActivationDescriptor& activationDesc, std::string name) { + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddMultiplicationLayer(name.c_str()); LayerType* replacementLayer = PolymorphicDowncast(replacement); + ARMNN_NO_DEPRECATE_WARN_END FuseLayer(optimizationViews, baseLayer, diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 00e549c933..e793b44cf4 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019-2023 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,6 +43,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateAddition(const AdditionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -103,6 +104,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateDivision(const DivisionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -152,6 +154,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -172,10 +175,12 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -248,6 +253,7 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") std::unique_ptr CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp index f33521888f..a0d1af6ab7 100644 --- a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -30,8 +30,7 @@ armnn::INetworkPtr CreateAdditionNetwork(const armnn::TensorShape& inputXShape, TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); - - IConnectableLayer* addition = network->AddAdditionLayer("addition"); + IConnectableLayer* addition = network->AddElementwiseBinaryLayer(BinaryOperation::Add, "addition"); IConnectableLayer* inputX = network->AddInputLayer(0, "inputX"); IConnectableLayer* inputY = network->AddInputLayer(1, "inputY"); IConnectableLayer* output = network->AddOutputLayer(0, "output"); diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 795fc13c32..9213f0eac9 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -39,7 +39,7 @@ bool ConstantUsageTest(const std::vector& computeDevice, IConnectableLayer* input = net->AddInputLayer(0); IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData)); - IConnectableLayer* add = net->AddAdditionLayer(); + IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add); IConnectableLayer* output = net->AddOutputLayer(0); input->GetOutputSlot(0).Connect(add->GetInputSlot(0)); @@ -176,7 +176,8 @@ void EndToEndLayerTestImpl(INetworkPtr network, for (unsigned int i = 0; i < out.size(); ++i) { CHECK_MESSAGE(Compare(it.second[i], out[i], tolerance) == true, - "Actual output: " << out[i] << ". Expected output:" << it.second[i]); + "Position: " << i <<" Actual output: " << static_cast(out[i]) << + ". Expected output:" << static_cast(it.second[i])); } } diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 5b95d3cd92..5475762a53 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -622,7 +622,9 @@ struct LayerTypePolicy; // Every entry in the armnn::LayerType enum must be accounted for below. DECLARE_LAYER_POLICY_2_PARAM(Activation) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Addition) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax) @@ -694,15 +696,21 @@ DECLARE_LAYER_POLICY_2_PARAM(Lstm) DECLARE_LAYER_POLICY_MAP_PARAM(Map, void) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Maximum) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(Mean) DECLARE_LAYER_POLICY_1_PARAM(Merge) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Minimum) +ARMNN_NO_DEPRECATE_WARN_END +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Multiplication) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(Normalization) @@ -726,7 +734,9 @@ DECLARE_LAYER_POLICY_2_PARAM(QLstm) DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Division) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_1_PARAM(Rank) @@ -752,7 +762,9 @@ DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn) DECLARE_LAYER_POLICY_2_PARAM(StridedSlice) +ARMNN_NO_DEPRECATE_WARN_BEGIN DECLARE_LAYER_POLICY_1_PARAM(Subtraction) +ARMNN_NO_DEPRECATE_WARN_END DECLARE_LAYER_POLICY_2_PARAM(Reduce) diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp index ff3217911a..665358b9c6 100644 --- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp +++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp @@ -263,7 +263,9 @@ TEST_CASE("OptimizeViewsValidateDeviceMockBackend") armnn::IConnectableLayer* input = net->AddInputLayer(0, "inLayer0"); armnn::IConnectableLayer* input1 = net->AddInputLayer(1, "inLayer1"); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* addition = net->AddAdditionLayer("addLayer"); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* output = net->AddOutputLayer(0, "outLayer"); diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp index f5a6c4217b..7303733e17 100644 --- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp +++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp @@ -133,6 +133,7 @@ Pooling2dLayer* AddPoolingLayer(Graph& graph, } // Convenience function to add an addition layer to a graph +ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* AddAdditionaLayer(Graph& graph, LayerNameToLayerMap& layersInGraph, const std::string& layerName, @@ -144,6 +145,7 @@ AdditionLayer* AddAdditionaLayer(Graph& graph, layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer)); return additionLayer; } +ARMNN_NO_DEPRECATE_WARN_END // Convenience function to check that the given substitution matches the specified expected values void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution, @@ -750,7 +752,9 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, L "conv2 layer unoptimizable", outputInfo); Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, "conv3 layer", outputInfo); + ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo); + ARMNN_NO_DEPRECATE_WARN_END Layer* const outputLayer = AddOutputLayer(graph, "output layer"); // Connect the network diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index a10b6fbb43..46ba9cb717 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -461,6 +461,7 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, } else if (base.GetType() == LayerType::Addition) { + ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClAdditionValidate( @@ -479,9 +480,11 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Division) { + ARMNN_NO_DEPRECATE_WARN_BEGIN DivisionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClDivisionWorkloadValidate( @@ -500,9 +503,11 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Multiplication) { + ARMNN_NO_DEPRECATE_WARN_BEGIN MultiplicationLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClMultiplicationWorkloadValidate( @@ -521,9 +526,11 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Subtraction) { + ARMNN_NO_DEPRECATE_WARN_BEGIN SubtractionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = ClSubtractionValidate( @@ -542,6 +549,7 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph, untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::ElementwiseBinary) { diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 89bcf9bc01..e1266c8299 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -346,7 +346,9 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, case LayerType::Dequantize: return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported); case LayerType::Division: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::ElementwiseBinary: { auto desc = *(PolymorphicDowncast(&descriptor)); @@ -474,16 +476,22 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, infos[2], reasonIfUnsupported); case LayerType::Maximum: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Mean: return IsMeanSupported(infos[0], infos[1], *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); case LayerType::Minimum: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Multiplication: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Normalization: return IsNormalizationSupported(infos[0], infos[1], @@ -604,7 +612,9 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); case LayerType::Subtraction: + ARMNN_NO_DEPRECATE_WARN_BEGIN return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END case LayerType::Transpose: return IsTransposeSupported(infos[0], infos[1], diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 2d784e3df8..fa28141dcb 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -30,6 +30,7 @@ public: const ActivationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsAdditionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -128,13 +129,14 @@ public: const Optional& biases, Optional reason = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsDivisionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsElementwiseUnarySupported(const TensorInfo& input, - const TensorInfo& ouput, + const TensorInfo& output, const ElementwiseUnaryDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -200,6 +202,7 @@ public: const LstmInputParamsInfo& paramsInfo, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsMaximumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -210,11 +213,13 @@ public: const MeanDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -325,6 +330,7 @@ public: const StridedSliceDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02") bool IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index cea2aa3eba..c68f4ce95b 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -313,6 +313,7 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph } else if (base.GetType() == LayerType::Addition) { + ARMNN_NO_DEPRECATE_WARN_BEGIN AdditionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonAdditionWorkloadValidate( @@ -331,9 +332,11 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Division) { + ARMNN_NO_DEPRECATE_WARN_BEGIN DivisionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonDivisionWorkloadValidate( @@ -352,9 +355,11 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Multiplication) { + ARMNN_NO_DEPRECATE_WARN_BEGIN MultiplicationLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonMultiplicationWorkloadValidate( @@ -373,9 +378,11 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::Subtraction) { + ARMNN_NO_DEPRECATE_WARN_BEGIN SubtractionLayer* baseLayer = PolymorphicDowncast(&base); arm_compute::Status status = NeonSubtractionWorkloadValidate( @@ -394,6 +401,7 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph untouched.erase(baseLayer->GetGuid()); untouched.erase(activationLayer->GetGuid()); } + ARMNN_NO_DEPRECATE_WARN_END } else if (base.GetType() == LayerType::ElementwiseBinary) { diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 66718cc481..19881c26a0 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -128,70 +128,86 @@ TEST_CASE("CreateAdditionFloat16Workload") TEST_CASE("CreateAdditionFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_CASE("CreateSubtractionFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #endif TEST_CASE("CreateSubtractionFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_CASE("CreateMultiplicationFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } #endif TEST_CASE("CreateMultiplicationFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionFloatWorkloadTest") { + ARMNN_NO_DEPRECATE_WARN_BEGIN NeonCreateElementwiseWorkloadTest(); + ARMNN_NO_DEPRECATE_WARN_END } template diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index c46a9e5bac..3bba0b7393 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -142,146 +142,182 @@ TEST_CASE("CreateMultiplicationWorkloadWithBlobTest") TEST_CASE("CreateAdditionFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateAdditionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateAdditionInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateAdditionInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, AdditionLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionFloat32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::Float16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateSubtractionInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, SubtractionLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationFloatWorkload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateMultiplicationInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, MultiplicationLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionFloat32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::Float32>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionFloat16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::Float16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionUint8Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::QAsymmU8>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionInt16Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::QSymmS16>(); + ARMNN_NO_DEPRECATE_WARN_END } TEST_CASE("CreateDivisionInt32Workload") { + ARMNN_NO_DEPRECATE_WARN_BEGIN RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, DivisionLayer, armnn::DataType::Signed32>(); + ARMNN_NO_DEPRECATE_WARN_END } template -- cgit v1.2.1