aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2022-05-30 11:08:52 +0100
committerNikhil Raj Arm <nikhil.raj@arm.com>2022-06-27 16:48:53 +0000
commit4d2eec0436f75d526c2ec25623ad73c8d1ee9ac3 (patch)
tree927ab42094125d813b6acf01329a44db0e39c23a
parent3cb65014be65f54b55f3e7ceb2f0b7fd0d27cf5c (diff)
downloadarmnn-4d2eec0436f75d526c2ec25623ad73c8d1ee9ac3.tar.gz
IVGCVSW-6981 Remove deprecated code 22.05 [Post Release]
Signed-off-by: Nikhil Raj <nikhil.raj@arm.com> Change-Id: I9ccaefbe28ea572e9e2b4a2168574804667f7460
-rw-r--r--CMakeLists.txt2
-rw-r--r--delegate/src/MultiLayerFacade.hpp16
-rw-r--r--include/armnn/BackendHelper.hpp4
-rw-r--r--include/armnn/Descriptors.hpp6
-rw-r--r--include/armnn/ILayerVisitor.hpp534
-rw-r--r--include/armnn/INetwork.hpp24
-rw-r--r--include/armnn/IStrategy.hpp5
-rw-r--r--include/armnn/LayerVisitorBase.hpp271
-rw-r--r--include/armnn/backends/CMakeLists.txt1
-rw-r--r--include/armnn/backends/CpuTensorHandleFwd.hpp20
-rw-r--r--include/armnn/backends/IBackendInternal.hpp4
-rw-r--r--include/armnn/backends/TensorHandle.hpp13
-rw-r--r--src/armnn/BackendHelper.cpp16
-rw-r--r--src/armnn/Descriptors.cpp5
-rw-r--r--src/armnn/Layer.hpp2
-rw-r--r--src/armnn/Network.cpp17
-rw-r--r--src/armnn/Network.hpp4
-rw-r--r--src/armnn/layers/AbsLayer.cpp6
-rw-r--r--src/armnn/layers/AbsLayer.hpp5
-rw-r--r--src/armnn/layers/ActivationLayer.cpp6
-rw-r--r--src/armnn/layers/ActivationLayer.hpp5
-rw-r--r--src/armnn/layers/AdditionLayer.cpp6
-rw-r--r--src/armnn/layers/AdditionLayer.hpp4
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp6
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp4
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp18
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp6
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp4
-rw-r--r--src/armnn/layers/CastLayer.cpp8
-rw-r--r--src/armnn/layers/CastLayer.hpp4
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.cpp7
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.hpp3
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp6
-rw-r--r--src/armnn/layers/ComparisonLayer.hpp4
-rw-r--r--src/armnn/layers/ConcatLayer.cpp6
-rw-r--r--src/armnn/layers/ConcatLayer.hpp4
-rw-r--r--src/armnn/layers/ConstantLayer.cpp9
-rw-r--r--src/armnn/layers/ConstantLayer.hpp4
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.hpp4
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.hpp4
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.hpp4
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.hpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp9
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp3
-rw-r--r--src/armnn/layers/Convolution3dLayer.cpp8
-rw-r--r--src/armnn/layers/Convolution3dLayer.hpp4
-rw-r--r--src/armnn/layers/DebugLayer.cpp6
-rw-r--r--src/armnn/layers/DebugLayer.hpp4
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp6
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.hpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp7
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp6
-rw-r--r--src/armnn/layers/DequantizeLayer.hpp4
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp10
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp4
-rw-r--r--src/armnn/layers/DivisionLayer.cpp6
-rw-r--r--src/armnn/layers/DivisionLayer.hpp4
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp6
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.hpp4
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp8
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp3
-rw-r--r--src/armnn/layers/FillLayer.cpp6
-rw-r--r--src/armnn/layers/FillLayer.hpp4
-rw-r--r--src/armnn/layers/FloorLayer.cpp6
-rw-r--r--src/armnn/layers/FloorLayer.hpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp7
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp4
-rw-r--r--src/armnn/layers/GatherLayer.cpp6
-rw-r--r--src/armnn/layers/GatherLayer.hpp4
-rw-r--r--src/armnn/layers/GatherNdLayer.cpp8
-rw-r--r--src/armnn/layers/GatherNdLayer.hpp4
-rw-r--r--src/armnn/layers/InputLayer.cpp6
-rw-r--r--src/armnn/layers/InputLayer.hpp4
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/L2NormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp6
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.hpp4
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.cpp6
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.hpp4
-rw-r--r--src/armnn/layers/LstmLayer.cpp212
-rw-r--r--src/armnn/layers/LstmLayer.hpp4
-rw-r--r--src/armnn/layers/MapLayer.cpp6
-rw-r--r--src/armnn/layers/MapLayer.hpp4
-rw-r--r--src/armnn/layers/MaximumLayer.cpp6
-rw-r--r--src/armnn/layers/MaximumLayer.hpp4
-rw-r--r--src/armnn/layers/MeanLayer.cpp6
-rw-r--r--src/armnn/layers/MeanLayer.hpp4
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp8
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp4
-rw-r--r--src/armnn/layers/MemImportLayer.cpp8
-rw-r--r--src/armnn/layers/MemImportLayer.hpp4
-rw-r--r--src/armnn/layers/MergeLayer.cpp6
-rw-r--r--src/armnn/layers/MergeLayer.hpp4
-rw-r--r--src/armnn/layers/MinimumLayer.cpp6
-rw-r--r--src/armnn/layers/MinimumLayer.hpp4
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp6
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp4
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/NormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/OutputLayer.cpp6
-rw-r--r--src/armnn/layers/OutputLayer.hpp4
-rw-r--r--src/armnn/layers/PadLayer.cpp6
-rw-r--r--src/armnn/layers/PadLayer.hpp5
-rw-r--r--src/armnn/layers/PermuteLayer.cpp6
-rw-r--r--src/armnn/layers/PermuteLayer.hpp4
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp6
-rw-r--r--src/armnn/layers/Pooling2dLayer.hpp4
-rw-r--r--src/armnn/layers/Pooling3dLayer.cpp6
-rw-r--r--src/armnn/layers/Pooling3dLayer.hpp4
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp8
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp5
-rw-r--r--src/armnn/layers/PreluLayer.cpp6
-rw-r--r--src/armnn/layers/PreluLayer.hpp4
-rw-r--r--src/armnn/layers/QLstmLayer.cpp232
-rw-r--r--src/armnn/layers/QLstmLayer.hpp5
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp8
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp5
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp139
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp5
-rw-r--r--src/armnn/layers/RankLayer.cpp7
-rw-r--r--src/armnn/layers/RankLayer.hpp5
-rw-r--r--src/armnn/layers/ReduceLayer.cpp6
-rw-r--r--src/armnn/layers/ReduceLayer.hpp4
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp6
-rw-r--r--src/armnn/layers/ReshapeLayer.hpp4
-rw-r--r--src/armnn/layers/ResizeLayer.cpp6
-rw-r--r--src/armnn/layers/ResizeLayer.hpp4
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp6
-rw-r--r--src/armnn/layers/RsqrtLayer.hpp4
-rw-r--r--src/armnn/layers/ShapeLayer.cpp7
-rw-r--r--src/armnn/layers/ShapeLayer.hpp5
-rw-r--r--src/armnn/layers/SliceLayer.cpp6
-rw-r--r--src/armnn/layers/SliceLayer.hpp4
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp6
-rw-r--r--src/armnn/layers/SoftmaxLayer.hpp4
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp6
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.hpp4
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp6
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.hpp4
-rw-r--r--src/armnn/layers/SplitterLayer.cpp6
-rw-r--r--src/armnn/layers/SplitterLayer.hpp4
-rw-r--r--src/armnn/layers/StackLayer.cpp6
-rw-r--r--src/armnn/layers/StackLayer.hpp4
-rw-r--r--src/armnn/layers/StandInLayer.cpp7
-rw-r--r--src/armnn/layers/StandInLayer.hpp6
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp6
-rw-r--r--src/armnn/layers/StridedSliceLayer.hpp4
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp6
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp4
-rw-r--r--src/armnn/layers/SwitchLayer.cpp6
-rw-r--r--src/armnn/layers/SwitchLayer.hpp5
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp18
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/TransposeLayer.cpp6
-rw-r--r--src/armnn/layers/TransposeLayer.hpp4
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp8
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp4
-rw-r--r--src/armnn/layers/UnmapLayer.cpp6
-rw-r--r--src/armnn/layers/UnmapLayer.hpp4
-rw-r--r--src/armnn/test/NetworkTests.cpp1
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp1
-rw-r--r--src/armnnSerializer/Serializer.hpp2
170 files changed, 171 insertions, 2159 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 959675db85..1fcadb0475 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -160,13 +160,11 @@ list(APPEND armnn_sources
include/armnn/backends/ILayerSupport.hpp
include/armnn/backends/ICustomAllocator.hpp
include/armnn/IAsyncExecutionCallback.hpp
- include/armnn/ILayerVisitor.hpp
include/armnn/INetwork.hpp
include/armnn/IProfiler.hpp
include/armnn/IRuntime.hpp
include/armnn/IStrategy.hpp
include/armnn/IWorkingMemHandle.hpp
- include/armnn/LayerVisitorBase.hpp
include/armnn/Logging.hpp
include/armnn/LstmParams.hpp
include/armnn/MemorySources.hpp
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
index c0df47bac9..2fdfc7082a 100644
--- a/delegate/src/MultiLayerFacade.hpp
+++ b/delegate/src/MultiLayerFacade.hpp
@@ -96,26 +96,12 @@ public:
return m_FirstLayer->GetGuid();
}
- // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
- // the deprecated ILayerVisitor which is used in the function.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
- "Accept function is deprecated. Use IStrategy in combination with "
- "ExecuteStrategy instead, which is an ABI/API stable version of the "
- "visitor pattern.",
- "22.05")
- virtual void Accept(armnn::ILayerVisitor& visitor) const override
- {
- // Do not expect this function to be used so not providing an implementation
- }
- ARMNN_NO_DEPRECATE_WARN_END
-
virtual void ExecuteStrategy(armnn::IStrategy& strategy) const override
{
// Do not expect this function to be used so not providing an implementation
// if an implementation is required and the chain contains more than two operators
// would have to provide a way to record the intermediate layers so they could be
- // visited... the same applies to the Accept method above and the BackendSelectionHint
+ // visited... the same applies to the BackendSelectionHint
// below.
}
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 87b99f03f0..09c7385d5c 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -467,10 +467,6 @@ Optional<const BackendOptions::BackendOption> GetCapability(const std::string& b
Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
const armnn::BackendId& backend);
-/// Convenience function to check a capability on a backend
-ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetBackendCapability", "22.05")
-bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability);
-
/// Returns the number of cached files if backend supports caching
unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend);
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 7f46c6a79d..628d045529 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -480,11 +480,7 @@ struct FullyConnectedDescriptor : BaseDescriptor
&& m_ConstantWeights == rhs.m_ConstantWeights;
}
- /// Get the number of views/inputs.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
- uint32_t GetNumViews() const;
-
- /// Get the number of views/inputs.
+ /// Get the number of inputs.
uint32_t GetNumInputs() const;
/// Enable/disable bias.
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
deleted file mode 100644
index a0c782e66e..0000000000
--- a/include/armnn/ILayerVisitor.hpp
+++ /dev/null
@@ -1,534 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <armnn/Deprecated.hpp>
-#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/NetworkFwd.hpp>
-#include <armnn/Optional.hpp>
-#include <armnn/TensorFwd.hpp>
-#include <armnn/Types.hpp>
-
-namespace armnn
-{
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable IStrategy instead.", "22.05") ILayerVisitor
-{
-protected:
- ILayerVisitor() {}
- virtual ~ILayerVisitor() {}
-
-public:
-
- /// Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param activationDescriptor - ActivationDescriptor to configure the activation.
- /// @param name - Optional name for the layer.
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that an addition layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitAdditionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function that an arg min max layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param argMinMaxDescriptor - ArgMinMaxDescriptor to configure the activation.
- /// @param name - Optional name for the layer.
- virtual void VisitArgMinMaxLayer(const IConnectableLayer* layer,
- const ArgMinMaxDescriptor& argMinMaxDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a batch normalization layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param mean - Pre-calculated mean for each channel.
- /// @param variance - Pre-calculated variance for each channel.
- /// @param beta - Per-channel additive factor.
- /// @param gamma - Per-channel multiplicative factor.
- /// @param name - Optional name for the layer.
- virtual void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& desc,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) = 0;
-
- /// Function that a batch to space ND layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param batchToSpaceNdDescriptor - Description of the layer.
- /// @param name - Optional name for the layer.
- virtual void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
- const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a Comparison layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param comparisonDescriptor - Description of the layer.
- /// @param name - Optional name for the layer.
- virtual void VisitComparisonLayer(const IConnectableLayer* layer,
- const ComparisonDescriptor& comparisonDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
- /// process. Number of Views must be equal to the number of inputs, and their order
- /// must match - e.g. first view corresponds to the first input, second view to the
- /// second input, etc....
- /// @param name - Optional name for the layer.
- virtual void VisitConcatLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& concatDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a layer with no inputs and a single output, which always corresponds to
- /// the passed in constant tensor should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param input - Tensor to be provided as the only output of the layer. The layer will maintain
- /// its own copy of the tensor data, meaning the memory referenced by @a input can
- /// be freed or reused after this function is called.
- /// @param name - Optional name for the layer.
- virtual void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr) = 0;
-
- /// Function that a 2D convolution layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param convolution2dDescriptor - Description of the 2D convolution layer.
- /// @param name - Optional name for the layer.
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a 2D convolution layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param convolution2dDescriptor - Description of the 2D convolution layer.
- /// @param weights - Tensor for the weights data.
- /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitConvolution2dLayer without ConstTensors")
- virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
-
- /// Function a depth to space layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
- /// @param name - Optional name for the layer.
- virtual void VisitDepthToSpaceLayer(const IConnectableLayer* layer,
- const DepthToSpaceDescriptor& depthToSpaceDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a 2D depthwise convolution layer with biases should call back to when its
- /// Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
- /// @param name - Optional name for the layer.
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a 2D depthwise convolution layer with biases should call back to when its
- /// Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
- /// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
- /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitDepthwiseConvolution2dLayer without ConstTensors")
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
-
- /// Function that a Dequantize layer should call back to when its
- /// Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitDequantizeLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function that a Detection PostProcess layer should call back to when its
- /// Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param descriptor - Description of the Detection PostProcess layer.
- /// @param anchors - Tensor for the anchors.
- /// @param name - Optional name for the layer.
- virtual void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
- const DetectionPostProcessDescriptor& descriptor,
- const ConstTensor& anchors,
- const char* name = nullptr) = 0;
-
- /// Function a division layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitDivisionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a ElementwiseUnary layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param elementwiseUnaryDescriptor - Description of the layer.
- /// @param name - Optional name for the layer.
- virtual void VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
- const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a fill layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param fillDescriptor - Description of the layer
- /// @param name - Optional name for the layer.
- virtual void VisitFillLayer(const IConnectableLayer* layer,
- const FillDescriptor& fillDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a floor layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitFloorLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
-
- /// Function that a fully connected layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param fullyConnectedDescriptor - Description of the fully connected layer.
- /// @param name - Optional name for the layer.
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a fully connected layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param fullyConnectedDescriptor - Description of the fully connected layer.
- /// @param weights - Tensor for the weights data.
- /// @param biases - Optional tensor for the bias data.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use VisitFullyConnectedLayer without ConstTensors", "22.05")
- virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
-
- /// Function a Gather layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param gatherDescriptor - Parameters for the gather operation.
- /// @param name - Optional name for the layer.
- virtual void VisitGatherLayer(const IConnectableLayer* layer,
- const GatherDescriptor& gatherDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified
- /// when passing the inputs to the IRuntime::EnqueueWorkload() function.
- /// @param name - Optional name for the layer.
- virtual void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) = 0;
-
- /// Function that an instance normalization layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param desc - Parameters for the instance normalization operation.
- /// @param name - Optional name for the layer.
- virtual void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
- const InstanceNormalizationDescriptor& desc,
- const char* name = nullptr) = 0;
-
- /// Function that an L2 normalization layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked. Normalization is performed along dimension 1, but requires a 4d input.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param desc - Parameters for the L2 normalization operation.
- /// @param name - Optional name for the layer.
- virtual void VisitL2NormalizationLayer(const IConnectableLayer* layer,
- const L2NormalizationDescriptor& desc,
- const char* name = nullptr) = 0;
-
- /// Function that a log softmax layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
- /// @param name - Optional name for the layer.
- virtual void VisitLogSoftmaxLayer(const IConnectableLayer* layer,
- const LogSoftmaxDescriptor& logSoftmaxDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a logical binary layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param logicalBinaryDescriptor - LogicalBinaryDescriptor to configure the logical unary layer.
- /// @param name - Optional name for the layer.
- virtual void VisitLogicalBinaryLayer(const IConnectableLayer* layer,
- const LogicalBinaryDescriptor& logicalBinaryDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function an Lstm layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param descriptor - Parameters controlling the operation of the Lstm operation.
- /// @param params - The weights and biases for the LSTM cell.
- /// @param name - Optional name for the layer.
- virtual void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) = 0;
-
- /// Function a Maximum layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitMaximumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a Mean layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param meanDescriptor - Parameters for the mean operation.
- /// @param name - Optional name for the layer.
- virtual void VisitMeanLayer(const IConnectableLayer* layer,
- const MeanDescriptor& meanDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a merge layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitMergeLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a Minimum layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitMinimumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function that a multiplication layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitMultiplicationLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function that a normalization layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
- /// @param name - Optional name for the layer.
- virtual void VisitNormalizationLayer(const IConnectableLayer* layer,
- const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function an output layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
- /// when passing the outputs to the IRuntime::EnqueueWorkload() function.
- /// @param name - Optional name for the layer.
- virtual void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) = 0;
-
- /// Function a pad layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
- /// such that paddings[i,0] indicates the amount of padding to add in front of dimension i, and
- /// paddings[i,1] indicates the amount of padding to add after the end of dimension i
- /// @param name - Optional name for the layer.
- virtual void VisitPadLayer(const IConnectableLayer* layer,
- const PadDescriptor& padDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a permute layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param permuteDescriptor - PermuteDescriptor to configure the permute.
- /// @param name - Optional name for the layer.
- virtual void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a pooling layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
- /// @param name - Optional name for the layer.
- virtual void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a pooling layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param pooling3dDescriptor - Pooling3dDescriptor to configure the pooling.
- /// @param name - Optional name for the layer.
- virtual void VisitPooling3dLayer(const IConnectableLayer* layer,
- const Pooling3dDescriptor& pooling3dDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a PReLU activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitPreluLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a quantize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitQuantizeLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a QLstm layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param descriptor - Parameters controlling the operation of the QLstm operation.
- /// @param params - The weights and biases for the layer
- /// @param name - Optional name for the layer.
- virtual void VisitQLstmLayer(const IConnectableLayer* layer,
- const QLstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) = 0;
-
- /// Function a QuantizedLstm layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param params - The weights and biases for the Quantized LSTM cell
- /// @param name - Optional name for the layer.
- virtual void VisitQuantizedLstmLayer(const IConnectableLayer* layer,
- const QuantizedLstmInputParams& params,
- const char* name = nullptr) = 0;
-
- /// Function a rank layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitRankLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function that a reduce layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param ReduceDescriptor - Parameters for the reduce max operation.
- /// @param name - Optional name for the layer.
- virtual void VisitReduceLayer(const IConnectableLayer* layer,
- const ReduceDescriptor& reduceDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a reshape layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param reshapeDescriptor - Parameters for the reshape operation.
- /// @param name - Optional name for the layer.
- virtual void VisitReshapeLayer(const IConnectableLayer* layer,
- const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a resize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param resizeDescriptor - Parameters for the resize operation.
- /// @param name - Optional name for the layer.
- virtual void VisitResizeLayer(const IConnectableLayer* layer,
- const ResizeDescriptor& resizeDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
- /// @param name - Optional name for the layer.
- virtual void VisitSliceLayer(const IConnectableLayer* layer,
- const SliceDescriptor& sliceDescriptor,
- const char* name = nullptr) = 0;
-
-
- /// Function that a softmax layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
- /// @param name - Optional name for the layer.
- virtual void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a space to batch layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
- /// @param name - Optional name for the layer.
- virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a space to depth layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
- /// @param name - Optional name for the layer.
- virtual void VisitSpaceToDepthLayer(const IConnectableLayer* layer,
- const SpaceToDepthDescriptor& spaceToDepthDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function that a splitter layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
- /// Number of Views must be equal to the number of outputs,
- /// and their order must match - e.g. first view corresponds to
- /// the first output, second view to the second output, etc....
- /// @param name - Optional name for the layer.
- virtual void VisitSplitterLayer(const IConnectableLayer* layer,
- const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a stack layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param stackDescriptor - Parameters for the stack operation.
- /// @param name - Optional name for the layer.
- virtual void VisitStackLayer(const IConnectableLayer* layer,
- const StackDescriptor& stackDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a StandInLayer should call back to when its Accept(ILaterVisitor&) function is invoked
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param standInDescriptor - Parameters for the stand-in layer.
- /// @param name - Optional name for the layer.
- virtual void VisitStandInLayer(const IConnectableLayer* layer,
- const StandInDescriptor& standInDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param stridedSliceDescriptor - Parameters for the strided slice operation.
- /// @param name - Optional name for the layer.
- virtual void VisitStridedSliceLayer(const IConnectableLayer* layer,
- const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) = 0;
-
- /// Function a subtraction layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitSubtractionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a switch layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- virtual void VisitSwitchLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function that a 2D transpose convolution layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param descriptor - Description of the 2D transpose convolution layer.
- /// @param weights - Tensor for the weights data.
- /// @param biases - Optional tensor for the bias data.
- /// @param name - Optional name for the layer.
- virtual void VisitTransposeConvolution2dLayer(const IConnectableLayer* layer,
- const TransposeConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) = 0;
-
- /// Function that a transpose layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
- /// @param name - Optional name for the layer.
- virtual void VisitTransposeLayer(const IConnectableLayer* layer,
- const TransposeDescriptor& transposeDescriptor,
- const char* name = nullptr) = 0;
-
- virtual void StartVisit() {}
- virtual void FinishVisit() {}
-
-};
-} // namespace armnn
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1ed560040f..94afbf346a 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -7,7 +7,6 @@
#include <armnn/BackendOptions.hpp>
#include <armnn/Deprecated.hpp>
#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/ILayerVisitor.hpp>
#include <armnn/IStrategy.hpp>
#include <armnn/NetworkFwd.hpp>
#include <armnn/Optional.hpp>
@@ -95,18 +94,6 @@ public:
/// Returns the unique id of the layer
virtual LayerGuid GetGuid() const = 0;
- // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
- // the deprecated ILayerVisitor which is used in the function.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- /// Apply a visitor to this layer
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
- "Accept function is deprecated. Use IStrategy in combination with "
- "ExecuteStrategy instead, which is an ABI/API stable version of the "
- "visitor pattern.",
- "22.05")
- virtual void Accept(ILayerVisitor& visitor) const = 0;
- ARMNN_NO_DEPRECATE_WARN_END
-
/// Apply a visitor to this layer
virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
@@ -764,17 +751,6 @@ public:
IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& descriptor,
const char* name = nullptr);
- // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
- // the deprecated ILayerVisitor which is used in the function.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- /// Apply a visitor to this layer
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
- "Accept function is deprecated. Use IStrategy in combination with "
- "ExecuteStrategy instead, which is an ABI/API stable version of the "
- "visitor pattern.",
- "22.05")
- void Accept(ILayerVisitor& visitor) const;
- ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const;
diff --git a/include/armnn/IStrategy.hpp b/include/armnn/IStrategy.hpp
index 8d29565dcc..aed881c0dd 100644
--- a/include/armnn/IStrategy.hpp
+++ b/include/armnn/IStrategy.hpp
@@ -7,6 +7,9 @@
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/Types.hpp>
+#include <armnn/NetworkFwd.hpp>
+#include <armnn/TensorFwd.hpp>
+
namespace armnn
{
@@ -17,7 +20,7 @@ IStrategy() {}
virtual ~IStrategy() {}
public:
-virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+virtual void ExecuteStrategy(const IConnectableLayer* layer,
const armnn::BaseDescriptor& descriptor,
const std::vector<armnn::ConstTensor>& constants,
const char* name,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
deleted file mode 100644
index 025fca7eb0..0000000000
--- a/include/armnn/LayerVisitorBase.hpp
+++ /dev/null
@@ -1,271 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/ILayerVisitor.hpp>
-
-namespace armnn
-{
-
-struct VisitorThrowingPolicy
-{
- static void Apply(const std::string& errorMessage = "") { throw UnimplementedException(errorMessage); }
-};
-
-struct VisitorNoThrowPolicy
-{
- static void Apply(const std::string&) {}
-};
-
-/// Visitor base class with empty implementations.
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-template<typename DefaultPolicy>
-class LayerVisitorBase : public ILayerVisitor
-{
-protected:
- LayerVisitorBase() {}
- virtual ~LayerVisitorBase() {}
-
-public:
-
- void VisitActivationLayer(const IConnectableLayer*,
- const ActivationDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitAdditionLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitArgMinMaxLayer(const IConnectableLayer*,
- const ArgMinMaxDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitBatchNormalizationLayer(const IConnectableLayer*,
- const BatchNormalizationDescriptor&,
- const ConstTensor&,
- const ConstTensor&,
- const ConstTensor&,
- const ConstTensor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitBatchToSpaceNdLayer(const IConnectableLayer*,
- const BatchToSpaceNdDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitComparisonLayer(const IConnectableLayer*,
- const ComparisonDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitConcatLayer(const IConnectableLayer*,
- const ConcatDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitConstantLayer(const IConnectableLayer*,
- const ConstTensor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitConvolution2dLayer(const IConnectableLayer*,
- const Convolution2dDescriptor&,
- const ConstTensor&,
- const Optional<ConstTensor>&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitConvolution2dLayer(const IConnectableLayer*,
- const Convolution2dDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitDepthToSpaceLayer(const IConnectableLayer*,
- const DepthToSpaceDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
- const DepthwiseConvolution2dDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
- const DepthwiseConvolution2dDescriptor&,
- const ConstTensor&,
- const Optional<ConstTensor>&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitDequantizeLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitDetectionPostProcessLayer(const IConnectableLayer*,
- const DetectionPostProcessDescriptor&,
- const ConstTensor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitDivisionLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitElementwiseUnaryLayer(const IConnectableLayer*,
- const ElementwiseUnaryDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitFillLayer(const IConnectableLayer*,
- const FillDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitFloorLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitFullyConnectedLayer(const IConnectableLayer*,
- const FullyConnectedDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitFullyConnectedLayer(const IConnectableLayer*,
- const FullyConnectedDescriptor&,
- const ConstTensor&,
- const Optional<ConstTensor>&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitGatherLayer(const IConnectableLayer*,
- const GatherDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitInputLayer(const IConnectableLayer*,
- LayerBindingId,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitInstanceNormalizationLayer(const IConnectableLayer*,
- const InstanceNormalizationDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitL2NormalizationLayer(const IConnectableLayer*,
- const L2NormalizationDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitLogSoftmaxLayer(const IConnectableLayer*,
- const LogSoftmaxDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitLogicalBinaryLayer(const IConnectableLayer*,
- const LogicalBinaryDescriptor&,
- const char*) override {DefaultPolicy::Apply(__func__); }
-
- void VisitLstmLayer(const IConnectableLayer*,
- const LstmDescriptor&,
- const LstmInputParams&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitMaximumLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitMeanLayer(const IConnectableLayer*,
- const MeanDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitMergeLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitMinimumLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitMultiplicationLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitNormalizationLayer(const IConnectableLayer*,
- const NormalizationDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitOutputLayer(const IConnectableLayer*,
- LayerBindingId,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitPadLayer(const IConnectableLayer*,
- const PadDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitPermuteLayer(const IConnectableLayer*,
- const PermuteDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitPooling2dLayer(const IConnectableLayer*,
- const Pooling2dDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitPreluLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitQuantizeLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitQLstmLayer(const IConnectableLayer*,
- const QLstmDescriptor&,
- const LstmInputParams&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitQuantizedLstmLayer(const IConnectableLayer*,
- const QuantizedLstmInputParams&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitRankLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitReduceLayer(const IConnectableLayer*,
- const ReduceDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitReshapeLayer(const IConnectableLayer*,
- const ReshapeDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitResizeLayer(const IConnectableLayer*,
- const ResizeDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSliceLayer(const IConnectableLayer*,
- const SliceDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSoftmaxLayer(const IConnectableLayer*,
- const SoftmaxDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSpaceToBatchNdLayer(const IConnectableLayer*,
- const SpaceToBatchNdDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSpaceToDepthLayer(const IConnectableLayer*,
- const SpaceToDepthDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSplitterLayer(const IConnectableLayer*,
- const ViewsDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitStackLayer(const IConnectableLayer*,
- const StackDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitStandInLayer(const IConnectableLayer*,
- const StandInDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitStridedSliceLayer(const IConnectableLayer*,
- const StridedSliceDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSubtractionLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitSwitchLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitTransposeConvolution2dLayer(const IConnectableLayer*,
- const TransposeConvolution2dDescriptor&,
- const ConstTensor&,
- const Optional<ConstTensor>&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
- void VisitTransposeLayer(const IConnectableLayer*,
- const TransposeDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
-};
-ARMNN_NO_DEPRECATE_WARN_END
-
-} // namespace armnn
diff --git a/include/armnn/backends/CMakeLists.txt b/include/armnn/backends/CMakeLists.txt
index 19046ed977..978916e2b0 100644
--- a/include/armnn/backends/CMakeLists.txt
+++ b/include/armnn/backends/CMakeLists.txt
@@ -4,7 +4,6 @@
#
list(APPEND armnnBackendsAPI_sources
- CpuTensorHandleFwd.hpp
TensorHandleFwd.hpp
DynamicBackend.hpp
IBackendInternal.hpp
diff --git a/include/armnn/backends/CpuTensorHandleFwd.hpp b/include/armnn/backends/CpuTensorHandleFwd.hpp
deleted file mode 100644
index a5a28d8135..0000000000
--- a/include/armnn/backends/CpuTensorHandleFwd.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-// This file is deprecated and will be removed soon.
-// Please use the new file include/armnn/TensorHandleFwd.hpp instead.
-
-#pragma once
-
-namespace armnn
-{
-
-class ConstCpuTensorHandle;
-class CpuTensorHandle;
-class ScopedCpuTensorHandle;
-class PassthroughCpuTensorHandle;
-class ConstPassthroughCpuTensorHandle;
-
-} // namespace armnn
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 98f0eaacd7..e393a7e1c5 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -172,10 +172,6 @@ public:
return BackendCapabilities("IBackendInternal NullCapabilities");
};
- /// Returns true if backend support the capability false otherwise
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetCapability", "22.05")
- virtual bool HasCapability(BackendCapability /*capabilityClass*/) const { return false; }
-
/// Signals the backend to use a custom memory allocator provided by the user
///
/// \param allocator - a pointer to the provided ICustomAllocator to use with this backend
diff --git a/include/armnn/backends/TensorHandle.hpp b/include/armnn/backends/TensorHandle.hpp
index 2e6c8485d1..c69f7c8083 100644
--- a/include/armnn/backends/TensorHandle.hpp
+++ b/include/armnn/backends/TensorHandle.hpp
@@ -251,17 +251,4 @@ private:
std::shared_ptr<ConstTensorHandle> m_TensorHandle;
};
-using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstCpuTensorHandle is deprecated, "
- "use ConstTensorHandle instead", "22.05") = ConstTensorHandle;
-using CpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("CpuTensorHandle is deprecated, "
- "use TensorHandle instead", "22.05") = TensorHandle;
-using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ScopedCpuTensorHandle is deprecated, "
- "use ScopedTensorHandle instead", "22.05") = ScopedTensorHandle;
-using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("PassthroughCpuTensorHandle is deprecated, use "
- "PassthroughTensorHandle instead",
- "22.05") = PassthroughTensorHandle;
-using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstPassthroughCpuTensorHandle is "
- "deprecated, use ConstPassthroughTensorHandle "
- "instead", "22.05") = ConstPassthroughTensorHandle;
-
} // namespace armnn
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index a5278eb21c..5b5bece783 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -110,22 +110,6 @@ bool HasCapability(const BackendOptions::BackendOption& backendOption, const arm
return false;
}
-/// Convenience function to check a capability on a backend
-bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability)
-{
- bool hasCapability = false;
- auto const& backendRegistry = armnn::BackendRegistryInstance();
- if (backendRegistry.IsBackendRegistered(backend))
- {
- auto factoryFunc = backendRegistry.GetFactory(backend);
- auto backendObject = factoryFunc();
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- hasCapability = backendObject->HasCapability(capability);
- ARMNN_NO_DEPRECATE_WARN_END
- }
- return hasCapability;
-}
-
unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
{
auto const& backendRegistry = armnn::BackendRegistryInstance();
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 4eb875e03d..c740fd03ad 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -450,11 +450,6 @@ uint32_t FullyConnectedDescriptor::GetNumInputs() const
return armnn::GetNumInputs(m_BiasEnabled);
}
-uint32_t FullyConnectedDescriptor::GetNumViews() const
-{
- return armnn::GetNumInputs(m_BiasEnabled);
-}
-
uint32_t DepthwiseConvolution2dDescriptor::GetNumInputs() const
{
return armnn::GetNumInputs(m_BiasEnabled);
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5edf66cabb..12c782c965 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -476,4 +476,4 @@ private:
LayerBindingId m_Id;
};
-}
+} //namespace armnn
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9520c1399e..8fe4445dcf 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -456,13 +456,6 @@ IConnectableLayer* INetwork::AddChannelShuffleLayer(const ChannelShuffleDescript
return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void INetwork::Accept(ILayerVisitor& visitor) const
-{
- return pNetworkImpl->Accept(visitor);
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
return pNetworkImpl->ExecuteStrategy(strategy);
@@ -2909,16 +2902,6 @@ IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor&
return layer;
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void NetworkImpl::Accept(ILayerVisitor& visitor) const
-{
- for (auto layer : GetGraph())
- {
- layer->Accept(visitor);
- };
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
{
for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2d34cfc3e2..a4387e65c0 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -253,10 +253,6 @@ public:
IConnectableLayer* AddConvertFp32ToFp16Layer(const char* name = nullptr);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const;
private:
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 13fa24aacf..072d13ce93 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -47,11 +47,9 @@ void AbsLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void AbsLayer::Accept(ILayerVisitor& visitor) const
+void AbsLayer::ExecuteStrategy(IStrategy &strategy) const
{
- visitor.VisitAbsLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GeName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 9ab66624f6..250bd8a689 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -28,10 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create an AbsLayer.
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 63c98a93f6..eea18d71e4 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -45,11 +45,9 @@ void ActivationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ActivationLayer::Accept(ILayerVisitor& visitor) const
+void ActivationLayer::ExecuteStrategy(IStrategy &strategy) const
{
- visitor.VisitActivationLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 47b7f66280..8d1196ffce 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -26,10 +26,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create an ActivationLayer.
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index f55bb55edd..7117c14f92 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -32,11 +32,9 @@ AdditionLayer* AdditionLayer::Clone(Graph& graph) const
return CloneBase<AdditionLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void AdditionLayer::Accept(ILayerVisitor& visitor) const
+void AdditionLayer::ExecuteStrategy(IStrategy &strategy) const
{
- visitor.VisitAdditionLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 71a8553078..6980677cde 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -23,9 +23,7 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
AdditionLayer* Clone(Graph& graph) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create an AdditionLayer.
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 30db7ba803..57a6ff12d8 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -86,11 +86,9 @@ void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
+void ArgMinMaxLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index f2125361ce..7a6b78396c 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ArgMinMaxLayer.
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 15a42dd46a..6f0e1a82a8 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -71,24 +71,6 @@ Layer::ConstantTensors BatchNormalizationLayer::GetConstantTensorsByRef()
return {m_Mean, m_Variance, m_Beta, m_Gamma};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
-{
- ManagedConstTensorHandle managedMean(m_Mean);
- ManagedConstTensorHandle managedVariance(m_Variance);
- ManagedConstTensorHandle managedBeta(m_Beta);
- ManagedConstTensorHandle managedGamma(m_Gamma);
-
- ConstTensor meanTensor(managedMean.GetTensorInfo(), managedMean.Map());
- ConstTensor varianceTensor(managedVariance.GetTensorInfo(), managedVariance.Map());
- ConstTensor betaTensor(managedBeta.GetTensorInfo(), managedBeta.Map());
- ConstTensor gammaTensor(managedGamma.GetTensorInfo(), managedGamma.Map());
-
- visitor.VisitBatchNormalizationLayer(
- this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
ManagedConstTensorHandle managedMean(m_Mean);
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 10ca7eca25..9715c56094 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -39,10 +39,6 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 485500d87d..f022c525a8 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -95,11 +95,9 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ outputShape });
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void BatchToSpaceNdLayer::Accept(ILayerVisitor& visitor) const
+void BatchToSpaceNdLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitBatchToSpaceNdLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index bb6eb7129d..a2c480a821 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a BatchToSpaceNdLayer.
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index 03b68659d1..efa74c82c3 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -46,12 +46,4 @@ void CastLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void CastLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("CastLayer VisitCastLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
} // namespace armnn
diff --git a/src/armnn/layers/CastLayer.hpp b/src/armnn/layers/CastLayer.hpp
index e0448131a2..e01e91c7c2 100644
--- a/src/armnn/layers/CastLayer.hpp
+++ b/src/armnn/layers/CastLayer.hpp
@@ -28,10 +28,6 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
protected:
/// Constructor to create a CastLayer.
CastLayer(const char *name);
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index 78a2393a52..33ea70e94a 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -47,11 +47,4 @@ void ChannelShuffleLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ChannelShuffleLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("ChannelShuffleLayer: VisitChannelShuffleLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
} \ No newline at end of file
diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp
index 903d161107..79ab426a44 100644
--- a/src/armnn/layers/ChannelShuffleLayer.hpp
+++ b/src/armnn/layers/ChannelShuffleLayer.hpp
@@ -11,9 +11,6 @@ namespace armnn
class ChannelShuffleLayer : public LayerWithParameters<ChannelShuffleDescriptor>
{
public:
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
/// Creates a dynamically-allocated copy of this layer.
/// @param graph The graph into which this layer is being cloned
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index cf16386f6e..b6cd48b268 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -74,11 +74,9 @@ void ComparisonLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ComparisonLayer::Accept(ILayerVisitor& visitor) const
+void ComparisonLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitComparisonLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index 07534afab1..7361c6b3f7 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -35,9 +35,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ComparisonLayer
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index b59e0b9a57..69660dd04f 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -318,11 +318,9 @@ void ConcatLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConcatLayer::Accept(ILayerVisitor& visitor) const
+void ConcatLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitConcatLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index fefedea608..db971ed720 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -44,9 +44,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ConcatLayer.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 0c06dd5a54..aee95d063c 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -62,15 +62,6 @@ void ConstantLayer::ValidateTensorShapesFromInputs()
outShape);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConstantLayer::Accept(ILayerVisitor& visitor) const
-{
- ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
- ConstTensor layerOutputTensor(managedLayerOutput.GetTensorInfo(), managedLayerOutput.Map());
- visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
{
ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index d3dd8cf47a..f5ab5464f2 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -39,10 +39,6 @@ public:
/// Free up the constant source data stored by the layer.
void ReleaseConstantData() override {}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
std::shared_ptr<ConstTensorHandle> m_LayerOutput;
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 6d843f3129..d7ad692820 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -47,14 +47,12 @@ void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
+void ConvertBf16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index d2c006655c..71312758e4 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ConvertBf16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index cc3c8b18e1..423721dc8c 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -47,14 +47,12 @@ void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
+void ConvertFp16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index 59faf6486d..ab01a20251 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ConvertFp16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index 978fbd16da..1556deeba3 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -48,14 +48,12 @@ void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
+void ConvertFp32ToBf16Layer::ExecuteStrategy(IStrategy& strategy) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("ConvertFp32ToBf16Layer should never appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 8e33cb2d6a..71de4fbcda 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ConvertFp32ToBf16Layer.
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 2e1074a1b4..748cde359b 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,14 +47,12 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
+void ConvertFp32ToFp16Layer::ExecuteStrategy(IStrategy& strategy) const
{
// These conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index e331c7d59a..8ae0f7c99e 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -27,9 +27,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ConvertFp32ToFp16Layer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index dbbd009716..d0233976c4 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -150,16 +150,9 @@ Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
-{
- visitor.VisitConvolution2dLayer(this, GetParameters(), GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
- strategy.ExecuteStrategy(this, GetParameters(), { }, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
} // namespace armnn
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 6bb86da18e..57999709cd 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -44,9 +44,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index 42b275e055..b01870af46 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -124,14 +124,6 @@ void Convolution3dLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution3dLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Convolution3dLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("Convolution3dLayer: VisitConvolution3dLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void Convolution3dLayer::ExecuteStrategy(IStrategy& strategy) const
{
strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
diff --git a/src/armnn/layers/Convolution3dLayer.hpp b/src/armnn/layers/Convolution3dLayer.hpp
index 7cbd6428dc..85628e5617 100644
--- a/src/armnn/layers/Convolution3dLayer.hpp
+++ b/src/armnn/layers/Convolution3dLayer.hpp
@@ -37,10 +37,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 90a55cbc40..57cf3b7cfd 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -52,13 +52,11 @@ void DebugLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DebugLayer::Accept(ILayerVisitor& visitor) const
+void DebugLayer::ExecuteStrategy(IStrategy& strategy) const
{
// by design debug layers are never in input graphs
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index 054f5e4d2b..fe7ad5c9e5 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a DebugLayer.
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 033154e81d..2414b00e2f 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -75,11 +75,9 @@ void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
+void DepthToSpaceLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitDepthToSpaceLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index d9f6752cbd..c7e08e97f7 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -35,9 +35,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a DepthToSpaceLayer.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 4fd280485a..dcd800e367 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -153,13 +153,6 @@ Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
-{
- visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index e8ae9a6e79..9b0e6ad8c6 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -43,10 +43,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index afa0a7382a..7bc03f4cde 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -46,11 +46,9 @@ void DequantizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DequantizeLayer::Accept(ILayerVisitor& visitor) const
+void DequantizeLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitDequantizeLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index 99bde85f72..b0d04c5abf 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a DequantizeLayer.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 58f261cc05..28c6d50659 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -79,16 +79,6 @@ Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef()
return { m_Anchors };
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
-{
- ManagedConstTensorHandle managedAnchors(m_Anchors);
- ConstTensor anchorTensor(managedAnchors.GetTensorInfo(), managedAnchors.Map());
- visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
- m_Anchors->Unmap();
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
{
ManagedConstTensorHandle managedAnchors(m_Anchors);
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index 1826645fc6..07eb270f1f 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -34,10 +34,6 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index c6faf41a84..e4e2a7d8b7 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -32,11 +32,9 @@ DivisionLayer* DivisionLayer::Clone(Graph& graph) const
return CloneBase<DivisionLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DivisionLayer::Accept(ILayerVisitor& visitor) const
+void DivisionLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitDivisionLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 91bccfc184..398a9477fd 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -24,9 +24,7 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
DivisionLayer* Clone(Graph& graph) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a DivisionLayer.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index c50910bd32..a6c1f16ed3 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -55,11 +55,9 @@ void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
+void ElementwiseUnaryLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index 1261882e0b..286030a2e2 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a ElementwiseUnaryLayer
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index f375f9af43..5c6ac18cf7 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -46,14 +46,6 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
IgnoreUnused(strategy);
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index c115c63f33..464fe69b3f 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -28,9 +28,6 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 5004fabedf..0822c3c83e 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -51,11 +51,9 @@ void FillLayer::ValidateTensorShapesFromInputs()
inferredShapes[0][0]);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FillLayer::Accept(ILayerVisitor& visitor) const
+void FillLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitFillLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index 096d9ba7dc..034da7545a 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -27,9 +27,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a FillLayer.
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 616c118552..471b575eb0 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -45,11 +45,9 @@ void FloorLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FloorLayer::Accept(ILayerVisitor& visitor) const
+void FloorLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitFloorLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 2b16cfab26..4906c858b9 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a FloorLayer.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 1f006c9d80..c20bc8d167 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -88,13 +88,6 @@ Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
-{
- visitor.VisitFullyConnectedLayer(this, GetParameters(), GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
{
strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index e97282d73f..09dbb5f33a 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,10 +43,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index 33d2088e69..252dfd2f45 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -89,11 +89,9 @@ void GatherLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void GatherLayer::Accept(ILayerVisitor& visitor) const
+void GatherLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitGatherLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 8c294079c3..90cbedc997 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a GatherLayer.
diff --git a/src/armnn/layers/GatherNdLayer.cpp b/src/armnn/layers/GatherNdLayer.cpp
index 1ca2cbbae3..036b3cfa86 100644
--- a/src/armnn/layers/GatherNdLayer.cpp
+++ b/src/armnn/layers/GatherNdLayer.cpp
@@ -93,12 +93,4 @@ void GatherNdLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherNdLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void GatherNdLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("GatherNdLayer VisitGatherNdLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
} // namespace armnn
diff --git a/src/armnn/layers/GatherNdLayer.hpp b/src/armnn/layers/GatherNdLayer.hpp
index 9e07715f90..d873bd324d 100644
--- a/src/armnn/layers/GatherNdLayer.hpp
+++ b/src/armnn/layers/GatherNdLayer.hpp
@@ -32,10 +32,6 @@ public:
/// will lead to a valid configuration of @ref GatherNdLayer.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
protected:
/// Constructor to create a GatherNdLayer.
/// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index c05278faf5..01351f61f2 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -35,11 +35,9 @@ void InputLayer::ValidateTensorShapesFromInputs()
"InputLayer should already have the TensorInfo set.");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void InputLayer::Accept(ILayerVisitor& visitor) const
+void InputLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitInputLayer(this, this->GetBindingId(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName(), GetBindingId());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index 2b73dcec35..fbc6a09a75 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create an InputLayer.
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 44e98700c9..10543c6aa1 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -46,11 +46,9 @@ void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
+void InstanceNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitInstanceNormalizationLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index addd61e4f8..16f99adc45 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a InstanceNormalizationLayer.
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 0e0ae2e66f..8ea242be61 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -46,11 +46,9 @@ void L2NormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
+void L2NormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitL2NormalizationLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 21072b20a0..4371143cc8 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a L2NormalizationLayer.
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 4f51a2baf8..0698b70638 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -45,11 +45,9 @@ void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
+void LogSoftmaxLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitLogSoftmaxLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index 9963f85f30..81ee760f5b 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -29,9 +29,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a LogSoftmaxLayer.
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 1a20c988a4..736a0c27ce 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -72,11 +72,9 @@ void LogicalBinaryLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const
+void LogicalBinaryLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp
index caeaa0a1af..500f644e51 100644
--- a/src/armnn/layers/LogicalBinaryLayer.hpp
+++ b/src/armnn/layers/LogicalBinaryLayer.hpp
@@ -35,9 +35,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a LogicalBinaryLayer
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 06e5e8e5d0..8e6bfdba3e 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -301,218 +301,6 @@ Layer::ConstantTensors LstmLayer::GetConstantTensorsByRef()
m_LayerNormParameters.m_OutputLayerNormWeights};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void LstmLayer::Accept(ILayerVisitor& visitor) const
-{
- LstmInputParams inputParams;
- ManagedConstTensorHandle managedInputToForgetWeights(m_BasicParameters.m_InputToForgetWeights);
- ManagedConstTensorHandle managedInputToCellWeights(m_BasicParameters.m_InputToCellWeights);
- ManagedConstTensorHandle managedInputToOutputWeights(m_BasicParameters.m_InputToOutputWeights);
- ManagedConstTensorHandle managedRecurrentToForgetWeights(m_BasicParameters.m_RecurrentToForgetWeights);
- ManagedConstTensorHandle managedRecurrentToCellWeights(m_BasicParameters.m_RecurrentToCellWeights);
- ManagedConstTensorHandle managedRecurrentToOutputWeights(m_BasicParameters.m_RecurrentToOutputWeights);
- ManagedConstTensorHandle managedForgetGateBias(m_BasicParameters.m_ForgetGateBias);
- ManagedConstTensorHandle managedCellBias(m_BasicParameters.m_CellBias);
- ManagedConstTensorHandle managedOutputGateBias(m_BasicParameters.m_OutputGateBias);
-
- // Cifg parameters
- ManagedConstTensorHandle managedInputToInputWeights(m_CifgParameters.m_InputToInputWeights);
- ManagedConstTensorHandle managedRecurrentToInputWeights(m_CifgParameters.m_RecurrentToInputWeights);
- ManagedConstTensorHandle managedInputGateBias(m_CifgParameters.m_InputGateBias);
-
- // Projection parameters
- ManagedConstTensorHandle managedProjectionWeights(m_ProjectionParameters.m_ProjectionWeights);
- ManagedConstTensorHandle managedProjectionBias(m_ProjectionParameters.m_ProjectionBias);
-
- // Peephole parameters
- ManagedConstTensorHandle managedCellToInputWeights(m_PeepholeParameters.m_CellToInputWeights);
- ManagedConstTensorHandle managedCellToForgetWeights(m_PeepholeParameters.m_CellToForgetWeights);
- ManagedConstTensorHandle managedCellToOutputWeights(m_PeepholeParameters.m_CellToOutputWeights);
-
- // Layer normalisation parameters
- ManagedConstTensorHandle managedInputLayerNormWeights(m_LayerNormParameters.m_InputLayerNormWeights);
- ManagedConstTensorHandle managedForgetLayerNormWeights(m_LayerNormParameters.m_ForgetLayerNormWeights);
- ManagedConstTensorHandle managedCellLayerNormWeights(m_LayerNormParameters.m_CellLayerNormWeights);
- ManagedConstTensorHandle managedOutputLayerNormWeights(m_LayerNormParameters.m_OutputLayerNormWeights);
-
- ConstTensor inputToInputWeightsTensor;
- if (m_CifgParameters.m_InputToInputWeights != nullptr)
- {
- ConstTensor inputToInputWeightsTensorCopy(managedInputToInputWeights.GetTensorInfo(),
- managedInputToInputWeights.Map());
- inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
- inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
- }
- ConstTensor inputToForgetWeightsTensor;
- if (m_BasicParameters.m_InputToForgetWeights != nullptr)
- {
- ConstTensor inputToForgetWeightsTensorCopy(managedInputToForgetWeights.GetTensorInfo(),
- managedInputToForgetWeights.Map());
- inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
- inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
- }
- ConstTensor inputToCellWeightsTensor;
- if (m_BasicParameters.m_InputToCellWeights != nullptr)
- {
- ConstTensor inputToCellWeightsTensorCopy(managedInputToCellWeights.GetTensorInfo(),
- managedInputToCellWeights.Map());
- inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
- inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
- }
- ConstTensor inputToOutputWeightsTensor;
- if (m_BasicParameters.m_InputToOutputWeights != nullptr)
- {
- ConstTensor inputToOutputWeightsTensorCopy(managedInputToOutputWeights.GetTensorInfo(),
- managedInputToOutputWeights.Map());
- inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
- inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
- }
- ConstTensor recurrentToInputWeightsTensor;
- if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
- {
- ConstTensor recurrentToInputWeightsTensorCopy(
- managedRecurrentToInputWeights.GetTensorInfo(),
- managedRecurrentToInputWeights.Map());
- recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
- inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
- }
- ConstTensor recurrentToForgetWeightsTensor;
- if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
- {
- ConstTensor recurrentToForgetWeightsTensorCopy(
- managedRecurrentToForgetWeights.GetTensorInfo(),
- managedRecurrentToForgetWeights.Map());
- recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
- inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
- }
- ConstTensor recurrentToCellWeightsTensor;
- if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
- {
- ConstTensor recurrentToCellWeightsTensorCopy(
- managedRecurrentToCellWeights.GetTensorInfo(),
- managedRecurrentToCellWeights.Map());
- recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
- inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
- }
- ConstTensor recurrentToOutputWeightsTensor;
- if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
- {
- ConstTensor recurrentToOutputWeightsTensorCopy(
- managedRecurrentToOutputWeights.GetTensorInfo(),
- managedRecurrentToOutputWeights.Map());
- recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
- inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
- }
- ConstTensor cellToInputWeightsTensor;
- if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
- {
- ConstTensor cellToInputWeightsTensorCopy(managedCellToInputWeights.GetTensorInfo(),
- managedCellToInputWeights.Map());
- cellToInputWeightsTensor = cellToInputWeightsTensorCopy;
- inputParams.m_CellToInputWeights = &cellToInputWeightsTensor;
- }
- ConstTensor cellToForgetWeightsTensor;
- if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
- {
- ConstTensor cellToForgetWeightsTensorCopy(managedCellToForgetWeights.GetTensorInfo(),
- managedCellToForgetWeights.Map());
- cellToForgetWeightsTensor = cellToForgetWeightsTensorCopy;
- inputParams.m_CellToForgetWeights = &cellToForgetWeightsTensor;
- }
- ConstTensor cellToOutputWeightsTensor;
- if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
- {
- ConstTensor cellToOutputWeightsTensorCopy(managedCellToOutputWeights.GetTensorInfo(),
- managedCellToOutputWeights.Map());
- cellToOutputWeightsTensor = cellToOutputWeightsTensorCopy;
- inputParams.m_CellToOutputWeights = &cellToOutputWeightsTensor;
- }
- ConstTensor inputGateBiasTensor;
- if (m_CifgParameters.m_InputGateBias != nullptr)
- {
- ConstTensor inputGateBiasTensorCopy(managedInputGateBias.GetTensorInfo(),
- managedInputGateBias.Map());
- inputGateBiasTensor = inputGateBiasTensorCopy;
- inputParams.m_InputGateBias = &inputGateBiasTensor;
- }
- ConstTensor forgetGateBiasTensor;
- if (m_BasicParameters.m_ForgetGateBias != nullptr)
- {
- ConstTensor forgetGateBiasTensorCopy(managedForgetGateBias.GetTensorInfo(),
- managedForgetGateBias.Map());
- forgetGateBiasTensor = forgetGateBiasTensorCopy;
- inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
- }
- ConstTensor cellBiasTensor;
- if (m_BasicParameters.m_CellBias != nullptr)
- {
- ConstTensor cellBiasTensorCopy(managedCellBias.GetTensorInfo(),
- managedCellBias.Map());
- cellBiasTensor = cellBiasTensorCopy;
- inputParams.m_CellBias = &cellBiasTensor;
- }
- ConstTensor outputGateBias;
- if (m_BasicParameters.m_OutputGateBias != nullptr)
- {
- ConstTensor outputGateBiasCopy(managedOutputGateBias.GetTensorInfo(),
- managedOutputGateBias.Map());
- outputGateBias = outputGateBiasCopy;
- inputParams.m_OutputGateBias = &outputGateBias;
- }
- ConstTensor projectionWeightsTensor;
- if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
- {
- ConstTensor projectionWeightsTensorCopy(managedProjectionWeights.GetTensorInfo(),
- managedProjectionWeights.Map());
- projectionWeightsTensor = projectionWeightsTensorCopy;
- inputParams.m_ProjectionWeights = &projectionWeightsTensor;
- }
- ConstTensor projectionBiasTensor;
- if (m_ProjectionParameters.m_ProjectionBias != nullptr)
- {
- ConstTensor projectionBiasTensorCopy(managedProjectionBias.GetTensorInfo(),
- managedProjectionBias.Map());
- projectionBiasTensor = projectionBiasTensorCopy;
- inputParams.m_ProjectionBias = &projectionBiasTensor;
- }
- ConstTensor inputLayerNormTensor;
- if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
- {
- ConstTensor inputLayerNormTensorCopy(managedInputLayerNormWeights.GetTensorInfo(),
- managedInputLayerNormWeights.Map());
- inputLayerNormTensor = inputLayerNormTensorCopy;
- inputParams.m_InputLayerNormWeights = &inputLayerNormTensor;
- }
- ConstTensor forgetLayerNormTensor;
- if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
- {
- ConstTensor forgetLayerNormTensorCopy(managedForgetLayerNormWeights.GetTensorInfo(),
- managedForgetLayerNormWeights.Map());
- forgetLayerNormTensor = forgetLayerNormTensorCopy;
- inputParams.m_ForgetLayerNormWeights = &forgetLayerNormTensor;
- }
- ConstTensor cellLayerNormTensor;
- if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
- {
- ConstTensor cellLayerNormTensorCopy(managedCellLayerNormWeights.GetTensorInfo(),
- managedCellLayerNormWeights.Map());
- cellLayerNormTensor = cellLayerNormTensorCopy;
- inputParams.m_CellLayerNormWeights = &cellLayerNormTensor;
- }
- ConstTensor outputLayerNormTensor;
- if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
- {
- ConstTensor outputLayerNormTensorCopy(managedOutputLayerNormWeights.GetTensorInfo(),
- managedOutputLayerNormWeights.Map());
- outputLayerNormTensor = outputLayerNormTensorCopy;
- inputParams.m_OutputLayerNormWeights = &outputLayerNormTensor;
- }
-
-
- visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
std::vector<ConstTensor> constTensors;
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index fbcc03dd6f..7310d41238 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -44,10 +44,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 7a33890820..6141974122 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -41,12 +41,10 @@ void MapLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT(GetNumOutputSlots() == 0);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MapLayer::Accept(ILayerVisitor& visitor) const
+void MapLayer::ExecuteStrategy(IStrategy& strategy) const
{
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("MapLayer should not appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MapLayer.hpp b/src/armnn/layers/MapLayer.hpp
index d82c44a36f..f450c88d6f 100644
--- a/src/armnn/layers/MapLayer.hpp
+++ b/src/armnn/layers/MapLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a MapLayer.
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 438c9be116..f074cf92bd 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -31,11 +31,9 @@ MaximumLayer* MaximumLayer::Clone(Graph& graph) const
return CloneBase<MaximumLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MaximumLayer::Accept(ILayerVisitor& visitor) const
+void MaximumLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitMaximumLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index f032b8867d..2b113a428d 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -24,9 +24,7 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MaximumLayer* Clone(Graph& graph) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a MaximumLayer.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index f695cc3735..49eac04a8e 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -103,11 +103,9 @@ void MeanLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MeanLayer::Accept(ILayerVisitor& visitor) const
+void MeanLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitMeanLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 94b0cbe1a3..87998bfc08 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -29,9 +29,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a MeanLayer.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 33b922cadc..3695117e92 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -49,14 +49,6 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MemCopyLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("MemCopyLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
{
IgnoreUnused(strategy);
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 3c6fd0d8d7..4d858b18b2 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -28,10 +28,6 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 0a1082fd1e..182082b5e0 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -49,14 +49,6 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MemImportLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("MemImportLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
{
IgnoreUnused(strategy);
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 778770132c..be6c463e28 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -28,10 +28,6 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index c979df875b..94a0c1033d 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -58,11 +58,9 @@ std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorS
return {inputShapes[0]};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MergeLayer::Accept(ILayerVisitor& visitor) const
+void MergeLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitMergeLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index d7cfcf3d1f..79bc6f5938 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -33,9 +33,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 894704132a..f3661f9b5b 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -32,11 +32,9 @@ MinimumLayer* MinimumLayer::Clone(Graph& graph) const
return CloneBase<MinimumLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MinimumLayer::Accept(ILayerVisitor& visitor) const
+void MinimumLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitMinimumLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 634591e935..17ef55ef9a 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -24,9 +24,7 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MinimumLayer* Clone(Graph& graph) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 36f2689506..bcc77dcc51 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -32,11 +32,9 @@ MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
return CloneBase<MultiplicationLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MultiplicationLayer::Accept(ILayerVisitor& visitor) const
+void MultiplicationLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitMultiplicationLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 8acf4f6d0d..2dea82279b 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -24,9 +24,7 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MultiplicationLayer* Clone(Graph& graph) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index e42a7cf28e..372cd7637a 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -46,11 +46,9 @@ void NormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void NormalizationLayer::Accept(ILayerVisitor& visitor) const
+void NormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitNormalizationLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index e36e8863a8..a66acd99d8 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 241aaeb468..43dd280ae6 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -37,11 +37,9 @@ void OutputLayer::ValidateTensorShapesFromInputs()
"OutputLayer: Input slot must be connected.");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void OutputLayer::Accept(ILayerVisitor& visitor) const
+void OutputLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitOutputLayer(this, GetBindingId(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName(), GetBindingId());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index d2bdf19ddd..b77714ed87 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -40,9 +40,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 7900fa5a97..ce63d7be51 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -73,11 +73,9 @@ void PadLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PadLayer::Accept(ILayerVisitor& visitor) const
+void PadLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitPadLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index 9a31ae5d60..a688f89b30 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -35,10 +35,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape> &inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a PadLayer.
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index e20eea6815..16dc4d6713 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -57,11 +57,9 @@ void PermuteLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PermuteLayer::Accept(ILayerVisitor& visitor) const
+void PermuteLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitPermuteLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index db256b361b..37ae444199 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -60,9 +60,7 @@ public:
GetPermutation().IsEqual(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation());
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 9fb055b27d..34deed2a30 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -117,11 +117,9 @@ void Pooling2dLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
+void Pooling2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitPooling2dLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 677c10b661..67f796eaca 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
index 046e146423..fe92f62822 100644
--- a/src/armnn/layers/Pooling3dLayer.cpp
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -121,11 +121,9 @@ void Pooling3dLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Pooling3dLayer::Accept(ILayerVisitor& visitor) const
+void Pooling3dLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitPooling3dLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/Pooling3dLayer.hpp b/src/armnn/layers/Pooling3dLayer.hpp
index 0aa48535c0..946d473fc4 100644
--- a/src/armnn/layers/Pooling3dLayer.hpp
+++ b/src/armnn/layers/Pooling3dLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index ff2fa322e7..94c9afad2f 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -49,14 +49,6 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
m_PreCompiledObject = std::move(preCompiledObject);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
{
IgnoreUnused(strategy);
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index 65cf79b6d4..7b478cf808 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -33,11 +33,6 @@ public:
void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
-
void ExecuteStrategy(IStrategy& strategy) const override;
private:
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 431e2f4e38..e6ab0d8b75 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -116,11 +116,9 @@ void PreluLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PreluLayer::Accept(ILayerVisitor& visitor) const
+void PreluLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitPreluLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index eecffbcd22..e718043be9 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -35,9 +35,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index eb33227b48..5d44c8f12d 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -303,238 +303,6 @@ Layer::ConstantTensors QLstmLayer::GetConstantTensorsByRef()
m_LayerNormParameters.m_OutputLayerNormWeights};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void QLstmLayer::Accept(ILayerVisitor& visitor) const
-{
- LstmInputParams inputParams;
- ManagedConstTensorHandle managedInputToForgetWeights(m_BasicParameters.m_InputToForgetWeights);
- ManagedConstTensorHandle managedInputToCellWeights(m_BasicParameters.m_InputToCellWeights);
- ManagedConstTensorHandle managedInputToOutputWeights(m_BasicParameters.m_InputToOutputWeights);
- ManagedConstTensorHandle managedRecurrentToForgetWeights(m_BasicParameters.m_RecurrentToForgetWeights);
- ManagedConstTensorHandle managedRecurrentToCellWeights(m_BasicParameters.m_RecurrentToCellWeights);
- ManagedConstTensorHandle managedRecurrentToOutputWeights(m_BasicParameters.m_RecurrentToOutputWeights);
- ManagedConstTensorHandle managedForgetGateBias(m_BasicParameters.m_ForgetGateBias);
- ManagedConstTensorHandle managedCellBias(m_BasicParameters.m_CellBias);
- ManagedConstTensorHandle managedOutputGateBias(m_BasicParameters.m_OutputGateBias);
-
- // Cifg parameters
- ManagedConstTensorHandle managedInputToInputWeights(m_CifgParameters.m_InputToInputWeights);
- ManagedConstTensorHandle managedRecurrentToInputWeights(m_CifgParameters.m_RecurrentToInputWeights);
- ManagedConstTensorHandle managedInputGateBias(m_CifgParameters.m_InputGateBias);
-
- // Projection parameters
- ManagedConstTensorHandle managedProjectionWeights(m_ProjectionParameters.m_ProjectionWeights);
- ManagedConstTensorHandle managedProjectionBias(m_ProjectionParameters.m_ProjectionBias);
-
- // Peephole parameters
- ManagedConstTensorHandle managedCellToInputWeights(m_PeepholeParameters.m_CellToInputWeights);
- ManagedConstTensorHandle managedCellToForgetWeights(m_PeepholeParameters.m_CellToForgetWeights);
- ManagedConstTensorHandle managedCellToOutputWeights(m_PeepholeParameters.m_CellToOutputWeights);
-
- // Layer normalisation parameters
- ManagedConstTensorHandle managedInputLayerNormWeights(m_LayerNormParameters.m_InputLayerNormWeights);
- ManagedConstTensorHandle managedForgetLayerNormWeights(m_LayerNormParameters.m_ForgetLayerNormWeights);
- ManagedConstTensorHandle managedCellLayerNormWeights(m_LayerNormParameters.m_CellLayerNormWeights);
- ManagedConstTensorHandle managedOutputLayerNormWeights(m_LayerNormParameters.m_OutputLayerNormWeights);
-
- ConstTensor inputToInputWeightsTensor;
- if (m_CifgParameters.m_InputToInputWeights != nullptr)
- {
- ConstTensor inputToInputWeightsTensorCopy(managedInputToInputWeights.GetTensorInfo(),
- managedInputToInputWeights.Map());
- inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
- inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
- }
-
- ConstTensor inputToForgetWeightsTensor;
- if (m_BasicParameters.m_InputToForgetWeights != nullptr)
- {
- ConstTensor inputToForgetWeightsTensorCopy(managedInputToForgetWeights.GetTensorInfo(),
- managedInputToForgetWeights.Map());
- inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
- inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
- }
-
- ConstTensor inputToCellWeightsTensor;
- if (m_BasicParameters.m_InputToCellWeights != nullptr)
- {
- ConstTensor inputToCellWeightsTensorCopy(managedInputToCellWeights.GetTensorInfo(),
- managedInputToCellWeights.Map());
- inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
- inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
- }
-
- ConstTensor inputToOutputWeightsTensor;
- if (m_BasicParameters.m_InputToOutputWeights != nullptr)
- {
- ConstTensor inputToOutputWeightsTensorCopy(managedInputToOutputWeights.GetTensorInfo(),
- managedInputToOutputWeights.Map());
- inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
- inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
- }
-
- ConstTensor recurrentToInputWeightsTensor;
- if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
- {
- ConstTensor recurrentToInputWeightsTensorCopy(
- managedRecurrentToInputWeights.GetTensorInfo(),
- managedRecurrentToInputWeights.Map());
- recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
- inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
- }
-
- ConstTensor recurrentToForgetWeightsTensor;
- if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
- {
- ConstTensor recurrentToForgetWeightsTensorCopy(
- managedRecurrentToForgetWeights.GetTensorInfo(),
- managedRecurrentToForgetWeights.Map());
- recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
- inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
- }
-
- ConstTensor recurrentToCellWeightsTensor;
- if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
- {
- ConstTensor recurrentToCellWeightsTensorCopy(
- managedRecurrentToCellWeights.GetTensorInfo(),
- managedRecurrentToCellWeights.Map());
- recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
- inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
- }
-
- ConstTensor recurrentToOutputWeightsTensor;
- if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
- {
- ConstTensor recurrentToOutputWeightsTensorCopy(
- managedRecurrentToOutputWeights.GetTensorInfo(),
- managedRecurrentToOutputWeights.Map());
- recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
- inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
- }
-
- ConstTensor cellToInputWeightsTensor;
- if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
- {
- ConstTensor cellToInputWeightsTensorCopy(managedCellToInputWeights.GetTensorInfo(),
- managedCellToInputWeights.Map());
- cellToInputWeightsTensor = cellToInputWeightsTensorCopy;
- inputParams.m_CellToInputWeights = &cellToInputWeightsTensor;
- }
-
- ConstTensor cellToForgetWeightsTensor;
- if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
- {
- ConstTensor cellToForgetWeightsTensorCopy(managedCellToForgetWeights.GetTensorInfo(),
- managedCellToForgetWeights.Map());
- cellToForgetWeightsTensor = cellToForgetWeightsTensorCopy;
- inputParams.m_CellToForgetWeights = &cellToForgetWeightsTensor;
- }
-
- ConstTensor cellToOutputWeightsTensor;
- if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
- {
- ConstTensor cellToOutputWeightsTensorCopy(managedCellToOutputWeights.GetTensorInfo(),
- managedCellToOutputWeights.Map());
- cellToOutputWeightsTensor = cellToOutputWeightsTensorCopy;
- inputParams.m_CellToOutputWeights = &cellToOutputWeightsTensor;
- }
-
- ConstTensor inputGateBiasTensor;
- if (m_CifgParameters.m_InputGateBias != nullptr)
- {
- ConstTensor inputGateBiasTensorCopy(managedInputGateBias.GetTensorInfo(),
- managedInputGateBias.Map());
- inputGateBiasTensor = inputGateBiasTensorCopy;
- inputParams.m_InputGateBias = &inputGateBiasTensor;
- }
-
- ConstTensor forgetGateBiasTensor;
- if (m_BasicParameters.m_ForgetGateBias != nullptr)
- {
- ConstTensor forgetGateBiasTensorCopy(managedForgetGateBias.GetTensorInfo(),
- managedForgetGateBias.Map());
- forgetGateBiasTensor = forgetGateBiasTensorCopy;
- inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
- }
-
- ConstTensor cellBiasTensor;
- if (m_BasicParameters.m_CellBias != nullptr)
- {
- ConstTensor cellBiasTensorCopy(managedCellBias.GetTensorInfo(),
- managedCellBias.Map());
- cellBiasTensor = cellBiasTensorCopy;
- inputParams.m_CellBias = &cellBiasTensor;
- }
-
- ConstTensor outputGateBias;
- if (m_BasicParameters.m_OutputGateBias != nullptr)
- {
- ConstTensor outputGateBiasCopy(managedOutputGateBias.GetTensorInfo(),
- managedOutputGateBias.Map());
- outputGateBias = outputGateBiasCopy;
- inputParams.m_OutputGateBias = &outputGateBias;
- }
-
- ConstTensor projectionWeightsTensor;
- if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
- {
- ConstTensor projectionWeightsTensorCopy(managedProjectionWeights.GetTensorInfo(),
- managedProjectionWeights.Map());
- projectionWeightsTensor = projectionWeightsTensorCopy;
- inputParams.m_ProjectionWeights = &projectionWeightsTensor;
- }
-
- ConstTensor projectionBiasTensor;
- if (m_ProjectionParameters.m_ProjectionBias != nullptr)
- {
- ConstTensor projectionBiasTensorCopy(managedProjectionBias.GetTensorInfo(),
- managedProjectionBias.Map());
- projectionBiasTensor = projectionBiasTensorCopy;
- inputParams.m_ProjectionBias = &projectionBiasTensor;
- }
-
- ConstTensor inputLayerNormTensor;
- if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
- {
- ConstTensor inputLayerNormTensorCopy(managedInputLayerNormWeights.GetTensorInfo(),
- managedInputLayerNormWeights.Map());
- inputLayerNormTensor = inputLayerNormTensorCopy;
- inputParams.m_InputLayerNormWeights = &inputLayerNormTensor;
- }
-
- ConstTensor forgetLayerNormTensor;
- if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
- {
- ConstTensor forgetLayerNormTensorCopy(managedForgetLayerNormWeights.GetTensorInfo(),
- managedForgetLayerNormWeights.Map());
- forgetLayerNormTensor = forgetLayerNormTensorCopy;
- inputParams.m_ForgetLayerNormWeights = &forgetLayerNormTensor;
- }
-
- ConstTensor cellLayerNormTensor;
- if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
- {
- ConstTensor cellLayerNormTensorCopy(managedCellLayerNormWeights.GetTensorInfo(),
- managedCellLayerNormWeights.Map());
- cellLayerNormTensor = cellLayerNormTensorCopy;
- inputParams.m_CellLayerNormWeights = &cellLayerNormTensor;
- }
-
- ConstTensor outputLayerNormTensor;
- if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
- {
- ConstTensor outputLayerNormTensorCopy(managedOutputLayerNormWeights.GetTensorInfo(),
- managedOutputLayerNormWeights.Map());
- outputLayerNormTensor = outputLayerNormTensorCopy;
- inputParams.m_OutputLayerNormWeights = &outputLayerNormTensor;
- }
-
-
- visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 12774a935e..115c47bddb 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -107,11 +107,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 55f23bf251..3ad286e64c 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -7,8 +7,6 @@
#include "LayerCloneBase.hpp"
-#include <armnn/ILayerVisitor.hpp>
-
namespace armnn
{
@@ -45,11 +43,9 @@ void QuantizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void QuantizeLayer::Accept(ILayerVisitor& visitor) const
+void QuantizeLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitQuantizeLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index d8898ba1e9..338d5d5fcb 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -12,7 +12,6 @@ namespace armnn {
//Forward
class IWorkload;
class IWorkloadFactory;
-class ILayerVisitor;
class QuantizeLayer : public Layer
{
@@ -23,9 +22,7 @@ public:
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index e9b9d1c6b9..9d58d25f60 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -170,145 +170,6 @@ Layer::ConstantTensors QuantizedLstmLayer::GetConstantTensorsByRef()
};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
-{
- QuantizedLstmInputParams inputParams;
-
- ManagedConstTensorHandle managedInputToInputWeights(m_QuantizedLstmParameters.m_InputToInputWeights);
- ManagedConstTensorHandle managedInputToForgetWeights(m_QuantizedLstmParameters.m_InputToForgetWeights);
- ManagedConstTensorHandle managedInputToCellWeights(m_QuantizedLstmParameters.m_InputToCellWeights);
- ManagedConstTensorHandle managedInputToOutputWeights(m_QuantizedLstmParameters.m_InputToOutputWeights);
-
- ManagedConstTensorHandle managedRecurrentToInputWeights(m_QuantizedLstmParameters.m_RecurrentToInputWeights);
- ManagedConstTensorHandle managedRecurrentToForgetWeights(m_QuantizedLstmParameters.m_RecurrentToForgetWeights);
- ManagedConstTensorHandle managedRecurrentToCellWeights(m_QuantizedLstmParameters.m_RecurrentToCellWeights);
- ManagedConstTensorHandle managedRecurrentToOutputWeights(m_QuantizedLstmParameters.m_RecurrentToOutputWeights);
-
- ManagedConstTensorHandle managedInputGateBias(m_QuantizedLstmParameters.m_InputGateBias);
- ManagedConstTensorHandle managedForgetGateBias(m_QuantizedLstmParameters.m_ForgetGateBias);
- ManagedConstTensorHandle managedCellBias(m_QuantizedLstmParameters.m_CellBias);
- ManagedConstTensorHandle managedOutputGateBias(m_QuantizedLstmParameters.m_OutputGateBias);
-
- // InputToX weight tensors
- ConstTensor inputToInputWeightsTensor;
- if (m_QuantizedLstmParameters.m_InputToInputWeights != nullptr)
- {
- ConstTensor inputToInputWeightsTensorCopy(managedInputToInputWeights.GetTensorInfo(),
- managedInputToInputWeights.Map());
- inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
- inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
- }
-
- ConstTensor inputToForgetWeightsTensor;
- if (m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr)
- {
- ConstTensor inputToForgetWeightsTensorCopy(managedInputToForgetWeights.GetTensorInfo(),
- managedInputToForgetWeights.Map());
- inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
- inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
- }
-
- ConstTensor inputToCellWeightsTensor;
- if (m_QuantizedLstmParameters.m_InputToCellWeights != nullptr)
- {
- ConstTensor inputToCellWeightsTensorCopy(managedInputToCellWeights.GetTensorInfo(),
- managedInputToCellWeights.Map());
- inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
- inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
- }
-
- ConstTensor inputToOutputWeightsTensor;
- if (m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr)
- {
- ConstTensor inputToOutputWeightsTensorCopy(managedInputToOutputWeights.GetTensorInfo(),
- managedInputToOutputWeights.Map());
- inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
- inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
- }
-
- // RecurrentToX weight tensors
- ConstTensor recurrentToInputWeightsTensor;
- if (m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr)
- {
- ConstTensor recurrentToInputWeightsTensorCopy(
- managedRecurrentToInputWeights.GetTensorInfo(),
- managedRecurrentToInputWeights.Map());
- recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
- inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
- }
-
- ConstTensor recurrentToForgetWeightsTensor;
- if (m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr)
- {
- ConstTensor recurrentToForgetWeightsTensorCopy(
- managedRecurrentToForgetWeights.GetTensorInfo(),
- managedRecurrentToForgetWeights.Map());
- recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
- inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
- }
-
- ConstTensor recurrentToCellWeightsTensor;
- if (m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr)
- {
- ConstTensor recurrentToCellWeightsTensorCopy(
- managedRecurrentToCellWeights.GetTensorInfo(),
- managedRecurrentToCellWeights.Map());
- recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
- inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
- }
-
- ConstTensor recurrentToOutputWeightsTensor;
- if (m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr)
- {
- ConstTensor recurrentToOutputWeightsTensorCopy(
- managedRecurrentToOutputWeights.GetTensorInfo(),
- managedRecurrentToOutputWeights.Map());
- recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
- inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
- }
-
- // Bias tensors
- ConstTensor inputGateBiasTensor;
- if (m_QuantizedLstmParameters.m_InputGateBias != nullptr)
- {
- ConstTensor inputGateBiasTensorCopy(managedInputGateBias.GetTensorInfo(),
- managedInputGateBias.Map());
- inputGateBiasTensor = inputGateBiasTensorCopy;
- inputParams.m_InputGateBias = &inputGateBiasTensor;
- }
-
- ConstTensor forgetGateBiasTensor;
- if (m_QuantizedLstmParameters.m_ForgetGateBias != nullptr)
- {
- ConstTensor forgetGateBiasTensorCopy(managedForgetGateBias.GetTensorInfo(),
- managedForgetGateBias.Map());
- forgetGateBiasTensor = forgetGateBiasTensorCopy;
- inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
- }
-
- ConstTensor cellBiasTensor;
- if (m_QuantizedLstmParameters.m_CellBias != nullptr)
- {
- ConstTensor cellBiasTensorCopy(managedCellBias.GetTensorInfo(),
- managedCellBias.Map());
- cellBiasTensor = cellBiasTensorCopy;
- inputParams.m_CellBias = &cellBiasTensor;
- }
-
- ConstTensor outputGateBiasTensor;
- if (m_QuantizedLstmParameters.m_OutputGateBias != nullptr)
- {
- ConstTensor outputGateBiasCopy(managedOutputGateBias.GetTensorInfo(),
- managedOutputGateBias.Map());
- outputGateBiasTensor = outputGateBiasCopy;
- inputParams.m_OutputGateBias = &outputGateBiasTensor;
- }
-
- visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
std::vector<ConstTensor> constTensors;
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index fe7d423145..8def0f3f10 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -69,11 +69,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 84d25bf756..0f9327b948 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -42,13 +42,6 @@ void RankLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void RankLayer::Accept(ILayerVisitor& visitor) const
-{
- visitor.VisitRankLayer(this, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void RankLayer::ExecuteStrategy(IStrategy& strategy) const
{
strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index 416e1b0f6e..52d14c446e 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -22,11 +22,6 @@ class RankLayer : public Layer
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 1f4387b58c..aa54bc8f0c 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -102,11 +102,9 @@ void ReduceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ReduceLayer::Accept(ILayerVisitor& visitor) const
+void ReduceLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitReduceLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp
index a6ac44e69c..e9ea5d8e3f 100644
--- a/src/armnn/layers/ReduceLayer.hpp
+++ b/src/armnn/layers/ReduceLayer.hpp
@@ -27,9 +27,7 @@ public:
/// will lead to a valid configuration of @ref ReduceLayer.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index b194f7a48d..c5ec45f211 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -53,11 +53,9 @@ void ReshapeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ReshapeLayer::Accept(ILayerVisitor& visitor) const
+void ReshapeLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitReshapeLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index d107b5cfc8..ed114f9f79 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -45,9 +45,7 @@ public:
m_Param.m_TargetShape == PolymorphicDowncast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 89a94f78d3..188d134880 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -75,11 +75,9 @@ void ResizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ResizeLayer::Accept(ILayerVisitor& visitor) const
+void ResizeLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitResizeLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index fab18c7716..a33573c2a3 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 3a63b7c502..0c2cc6373b 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -48,11 +48,9 @@ void RsqrtLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void RsqrtLayer::Accept(ILayerVisitor& visitor) const
+void RsqrtLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitRsqrtLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index a31aea6498..c09be2e851 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index ecc112c02e..dbc0d7a888 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -59,13 +59,6 @@ std::vector<TensorShape> ShapeLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ outputShape });
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ShapeLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("ShapeLayer VisitShapeLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
void ShapeLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/ShapeLayer.hpp b/src/armnn/layers/ShapeLayer.hpp
index 35ef873792..071b0df26a 100644
--- a/src/armnn/layers/ShapeLayer.hpp
+++ b/src/armnn/layers/ShapeLayer.hpp
@@ -34,11 +34,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
-
void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index 0d61181c5d..6362be3de2 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -59,11 +59,9 @@ std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ outputShape });
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SliceLayer::Accept(ILayerVisitor& visitor) const
+void SliceLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSliceLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index dda66a1be6..1162e6a4ad 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index a2930e6035..b1cb191511 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -46,11 +46,9 @@ void SoftmaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
+void SoftmaxLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSoftmaxLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index 035e7bcf2d..c37ecda10d 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index a4c6d1b237..151b6a5301 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -83,11 +83,9 @@ void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
+void SpaceToBatchNdLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 70972bd8b3..e61ec6cd05 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -35,9 +35,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 51d79f4d03..f2f0b768a7 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -77,11 +77,9 @@ void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
+void SpaceToDepthLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index 267ac3b089..07764d8e3c 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -35,9 +35,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 42cb6e1950..0226a046be 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -241,11 +241,9 @@ void SplitterLayer::ValidateTensorShapesFromInputs()
}
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SplitterLayer::Accept(ILayerVisitor& visitor) const
+void SplitterLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSplitterLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 1fc37ef295..2f868e83b6 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -43,9 +43,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index b842f1b4d5..3ebacaf3b5 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -95,11 +95,9 @@ void StackLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void StackLayer::Accept(ILayerVisitor& visitor) const
+void StackLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitStackLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 8d38907de7..973645d1fc 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index ccf152921a..e0d057e37c 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -41,10 +41,9 @@ void StandInLayer::ValidateTensorShapesFromInputs()
// so do nothing here.
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void StandInLayer::Accept(ILayerVisitor& visitor) const
+void StandInLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitStandInLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
+
} // namespace armnn
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index bb500065eb..9487ff86d9 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -35,11 +35,7 @@ public:
/// @return Does not return anything. Throws Exception if called.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- /// Accepts a visitor object and calls VisitStandInLayer() method.
- /// @param visitor The visitor on which to call VisitStandInLayer() method.
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index 56051c28ee..a179531306 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -111,11 +111,9 @@ void StridedSliceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
+void StridedSliceLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitStridedSliceLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 7e17cb2e84..888ae7e9d9 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -34,9 +34,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 8e9b1733b7..0e92013351 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -32,11 +32,9 @@ SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
return CloneBase<SubtractionLayer>(graph, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SubtractionLayer::Accept(ILayerVisitor& visitor) const
+void SubtractionLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSubtractionLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 8c31479c8e..86d5f9ea03 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -24,9 +24,7 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
SubtractionLayer* Clone(Graph& graph) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index afa4d52f9d..c2022fdcbb 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -52,11 +52,9 @@ void SwitchLayer::ValidateTensorShapesFromInputs()
GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SwitchLayer::Accept(ILayerVisitor& visitor) const
+void SwitchLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitSwitchLayer(this, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index a36261b51a..4af82f2c1b 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -28,10 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
/// Constructor to create a SwitchLayer.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 1cbaf342cd..eec42fbb78 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -122,24 +122,6 @@ Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
-{
- ManagedConstTensorHandle managedWeight(m_Weight);
- ConstTensor weightsTensor(managedWeight.GetTensorInfo(), managedWeight.Map());
-
- Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
- ManagedConstTensorHandle managedBias(m_Bias);
- if (GetParameters().m_BiasEnabled)
- {
- ConstTensor biasTensor(managedBias.GetTensorInfo(), managedBias.Map());
- optionalBiasTensor = Optional<ConstTensor>(biasTensor);
- }
-
- visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index b6db41c2b7..1fa2902dfe 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -40,10 +40,6 @@ public:
/// @return A vector of the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 3340b9ddf9..bc9e0acf7c 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -57,11 +57,9 @@ void TransposeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void TransposeLayer::Accept(ILayerVisitor& visitor) const
+void TransposeLayer::ExecuteStrategy(IStrategy& strategy) const
{
- visitor.VisitTransposeLayer(this, GetParameters(), GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index 8449db4d9d..08268f2a54 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -58,9 +58,7 @@ public:
GetPermutation().IsEqual(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation());
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index e5f89bd017..857f369cb6 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -308,14 +308,6 @@ Layer::ConstantTensors UnidirectionalSequenceLstmLayer::GetConstantTensorsByRef(
m_LayerNormParameters.m_OutputLayerNormWeights};
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void UnidirectionalSequenceLstmLayer::Accept(ILayerVisitor& visitor) const
-{
- IgnoreUnused(visitor);
- throw armnn::Exception("UnidirectionalSequenceLstmLayer: VisitUnidirectionalSequenceLstmLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
void UnidirectionalSequenceLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
std::vector<ConstTensor> constTensors;
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
index 857d2776a9..60b6893627 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
@@ -44,10 +44,6 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
-
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index 9705e3ffee..cfbde211ba 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -41,12 +41,10 @@ void UnmapLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT(GetNumOutputSlots() == 0);
}
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void UnmapLayer::Accept(ILayerVisitor& visitor) const
+void UnmapLayer::ExecuteStrategy(IStrategy& strategy) const
{
- IgnoreUnused(visitor);
+ IgnoreUnused(strategy);
throw armnn::Exception("UnmapLayer should not appear in an input graph");
}
-ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp
index 3d1d11534e..8c8aecd50a 100644
--- a/src/armnn/layers/UnmapLayer.hpp
+++ b/src/armnn/layers/UnmapLayer.hpp
@@ -28,9 +28,7 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- void Accept(ILayerVisitor& visitor) const override;
- ARMNN_NO_DEPRECATE_WARN_END
+ void ExecuteStrategy(IStrategy& strategy) const override;
protected:
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 9d9810408e..058f079e46 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -5,7 +5,6 @@
#include <GraphUtils.hpp>
-#include <armnn/LayerVisitorBase.hpp>
#include <Network.hpp>
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 2ea3c2abf1..d5d506d541 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -5,7 +5,6 @@
#include <doctest/doctest.h>
-#include <armnn/LayerVisitorBase.hpp>
#include <armnn/backends/IBackendContext.hpp>
#include <armnn/backends/IBackendInternal.hpp>
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 1a0978f0de..216f4dc016 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -4,9 +4,7 @@
//
#pragma once
-#include <armnn/ILayerVisitor.hpp>
#include <armnn/IStrategy.hpp>
-#include <armnn/LayerVisitorBase.hpp>
#include <armnnSerializer/ISerializer.hpp>