aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-09-24 15:45:46 +0100
committerJan Eilers <jan.eilers@arm.com>2021-10-02 16:27:39 +0100
commit1b2654fb799c3d25ffcef4d31b5d026d359e2f8f (patch)
tree0397fdf24f286715e26a0e63bddaa0502f64caf7 /src/armnn
parentb63a31170aee1d28267d83a4bc67b57708fb6b05 (diff)
downloadarmnn-1b2654fb799c3d25ffcef4d31b5d026d359e2f8f.tar.gz
IVGCVSW-5985 Remove deprecated code
* Removes deprecated AddLayer, IsLayerSupported functions * Marks the whole LayerVisitor class as deprecated not just the constructor. This required to wrap all Accept functions in a no deprecate macro because the LayerVisitor is used as a parameter in there * Removes usage of deprecated LayerVisitor and replaces it with ExecuteStrategy. This required a few structural changes in the unit tests * Adds a default implementation for IStrategy called StrategyBase * Changes pyarmnn to use non deprecated constructor for INetworkProperties and adds related unit test * Marks usage of deprecated code in pyarmnn as deprecated. This required to extend INetworkProperties to allow backwards compatibility * Removes deprecated functions from CpuAcc, GpuAcc and Ref backends Note: This patch breaks compatibility with backends that are not updated in this patch !android-nn-driver:6325 Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Id13b6f37a74d26eadeda2da1dc92915e725ed5a5
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp62
-rw-r--r--src/armnn/CompatibleTypes.hpp3
-rw-r--r--src/armnn/ISubgraphViewConverter.hpp5
-rw-r--r--src/armnn/LayerSupport.cpp69
-rw-r--r--src/armnn/Network.cpp162
-rw-r--r--src/armnn/Network.hpp48
-rw-r--r--src/armnn/SerializeLayerParameters.cpp10
-rw-r--r--src/armnn/SerializeLayerParameters.hpp5
-rw-r--r--src/armnn/SubgraphView.hpp6
-rw-r--r--src/armnn/layers/AbsLayer.cpp2
-rw-r--r--src/armnn/layers/AbsLayer.hpp3
-rw-r--r--src/armnn/layers/ActivationLayer.cpp2
-rw-r--r--src/armnn/layers/ActivationLayer.hpp2
-rw-r--r--src/armnn/layers/AdditionLayer.cpp2
-rw-r--r--src/armnn/layers/AdditionLayer.hpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp2
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp2
-rw-r--r--src/armnn/layers/CastLayer.cpp2
-rw-r--r--src/armnn/layers/CastLayer.hpp2
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.cpp3
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.hpp2
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp2
-rw-r--r--src/armnn/layers/ComparisonLayer.hpp2
-rw-r--r--src/armnn/layers/ConcatLayer.cpp2
-rw-r--r--src/armnn/layers/ConcatLayer.hpp2
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.hpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.hpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.hpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.hpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.hpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp2
-rw-r--r--src/armnn/layers/DebugLayer.cpp2
-rw-r--r--src/armnn/layers/DebugLayer.hpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.hpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp2
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp2
-rw-r--r--src/armnn/layers/DequantizeLayer.hpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp2
-rw-r--r--src/armnn/layers/DivisionLayer.cpp2
-rw-r--r--src/armnn/layers/DivisionLayer.hpp2
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp2
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.hpp2
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp2
-rw-r--r--src/armnn/layers/FillLayer.cpp2
-rw-r--r--src/armnn/layers/FillLayer.hpp2
-rw-r--r--src/armnn/layers/FloorLayer.cpp2
-rw-r--r--src/armnn/layers/FloorLayer.hpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp2
-rw-r--r--src/armnn/layers/GatherLayer.cpp2
-rw-r--r--src/armnn/layers/GatherLayer.hpp2
-rw-r--r--src/armnn/layers/InputLayer.cpp2
-rw-r--r--src/armnn/layers/InputLayer.hpp2
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.hpp2
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.cpp2
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.hpp2
-rw-r--r--src/armnn/layers/LstmLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.hpp2
-rw-r--r--src/armnn/layers/MapLayer.cpp2
-rw-r--r--src/armnn/layers/MapLayer.hpp2
-rw-r--r--src/armnn/layers/MaximumLayer.cpp2
-rw-r--r--src/armnn/layers/MaximumLayer.hpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp2
-rw-r--r--src/armnn/layers/MeanLayer.hpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp2
-rw-r--r--src/armnn/layers/MemImportLayer.cpp2
-rw-r--r--src/armnn/layers/MemImportLayer.hpp2
-rw-r--r--src/armnn/layers/MergeLayer.cpp2
-rw-r--r--src/armnn/layers/MergeLayer.hpp3
-rw-r--r--src/armnn/layers/MinimumLayer.cpp2
-rw-r--r--src/armnn/layers/MinimumLayer.hpp3
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp2
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp3
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/NormalizationLayer.hpp3
-rw-r--r--src/armnn/layers/OutputLayer.cpp2
-rw-r--r--src/armnn/layers/OutputLayer.hpp3
-rw-r--r--src/armnn/layers/PadLayer.cpp2
-rw-r--r--src/armnn/layers/PadLayer.hpp3
-rw-r--r--src/armnn/layers/PermuteLayer.cpp2
-rw-r--r--src/armnn/layers/PermuteLayer.hpp3
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp2
-rw-r--r--src/armnn/layers/Pooling2dLayer.hpp3
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp3
-rw-r--r--src/armnn/layers/PreluLayer.cpp2
-rw-r--r--src/armnn/layers/PreluLayer.hpp3
-rw-r--r--src/armnn/layers/QLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp3
-rw-r--r--src/armnn/layers/RankLayer.cpp3
-rw-r--r--src/armnn/layers/RankLayer.hpp3
-rw-r--r--src/armnn/layers/ReduceLayer.cpp2
-rw-r--r--src/armnn/layers/ReduceLayer.hpp3
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.hpp3
-rw-r--r--src/armnn/layers/ResizeLayer.cpp2
-rw-r--r--src/armnn/layers/ResizeLayer.hpp3
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp2
-rw-r--r--src/armnn/layers/RsqrtLayer.hpp3
-rw-r--r--src/armnn/layers/ShapeLayer.cpp2
-rw-r--r--src/armnn/layers/ShapeLayer.hpp3
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SliceLayer.hpp3
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/SoftmaxLayer.hpp3
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.hpp3
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.hpp3
-rw-r--r--src/armnn/layers/SplitterLayer.cpp2
-rw-r--r--src/armnn/layers/SplitterLayer.hpp3
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StackLayer.hpp3
-rw-r--r--src/armnn/layers/StandInLayer.cpp2
-rw-r--r--src/armnn/layers/StandInLayer.hpp3
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp2
-rw-r--r--src/armnn/layers/StridedSliceLayer.hpp3
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp2
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp3
-rw-r--r--src/armnn/layers/SwitchLayer.cpp2
-rw-r--r--src/armnn/layers/SwitchLayer.hpp3
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp3
-rw-r--r--src/armnn/layers/TransposeLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeLayer.hpp3
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp2
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp3
-rw-r--r--src/armnn/layers/UnmapLayer.cpp2
-rw-r--r--src/armnn/layers/UnmapLayer.hpp3
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp236
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp358
-rw-r--r--src/armnn/test/NetworkTests.cpp118
-rw-r--r--src/armnn/test/OptimizerTests.cpp67
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.cpp8
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.hpp56
-rw-r--r--src/armnn/test/TestLayerVisitor.cpp56
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp19
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp4
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp30
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp4
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.hpp24
165 files changed, 924 insertions, 750 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 1616fd1aad..cc792a06ef 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -136,15 +136,6 @@ bool LayerSupportHandle::IsBackendRegistered() const
return false;
}
-
-bool LayerSupportHandle::IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- // Call the IsXXXLayerSupport function of the specific backend.
- return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -388,14 +379,6 @@ bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
@@ -478,28 +461,12 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
-}
-
-bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported)
{
@@ -613,14 +580,6 @@ bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -758,13 +717,6 @@ bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
@@ -773,13 +725,6 @@ bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
@@ -820,13 +765,6 @@ bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
}
bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
-}
-
-bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
index 1a663d3e27..e24d5dfc4c 100644
--- a/src/armnn/CompatibleTypes.hpp
+++ b/src/armnn/CompatibleTypes.hpp
@@ -46,11 +46,8 @@ inline bool CompatibleTypes<uint8_t>(DataType dataType)
template<>
inline bool CompatibleTypes<int8_t>(DataType dataType)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return dataType == DataType::QSymmS8
- || dataType == DataType::QuantizedSymm8PerAxis
|| dataType == DataType::QAsymmS8;
- ARMNN_NO_DEPRECATE_WARN_END
}
template<>
diff --git a/src/armnn/ISubgraphViewConverter.hpp b/src/armnn/ISubgraphViewConverter.hpp
index 34789a2b28..2e108e1f3b 100644
--- a/src/armnn/ISubgraphViewConverter.hpp
+++ b/src/armnn/ISubgraphViewConverter.hpp
@@ -25,9 +25,4 @@ public:
virtual std::vector<CompiledBlobPtr> CompileNetwork() = 0;
};
-///
-/// Old ISubGraphConverter definition kept for backward compatibility only.
-///
-using ISubGraphConverter ARMNN_DEPRECATED_MSG("This type is no longer supported") = ISubgraphViewConverter;
-
} // namespace armnn
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 9eaa97cebc..4cb7492e3a 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -305,18 +305,6 @@ bool IsFullyConnectedSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
}
-ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
-bool IsGatherSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- const GatherDescriptor descriptor{};
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output, descriptor);
-}
-
bool IsGatherSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -423,21 +411,6 @@ bool IsMergeSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
}
-ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-bool IsMergerSupported(const BackendId& backend,
- std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ARMNN_ASSERT(inputs.size() > 0);
-
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
- ARMNN_NO_DEPRECATE_WARN_END
-}
-
bool IsMinimumSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -589,36 +562,6 @@ bool IsResizeSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
}
-ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-bool IsResizeBilinearSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ResizeDescriptor descriptor;
- descriptor.m_Method = ResizeMethod::Bilinear;
-
- const TensorShape& outputShape = output.GetShape();
- descriptor.m_TargetWidth = outputShape[3];
- descriptor.m_TargetHeight = outputShape[2];
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
-}
-
-bool IsRsqrtSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsElementwiseUnarySupported,
- input,
- output,
- ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt));
-}
-
bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -649,18 +592,6 @@ bool IsSpaceToDepthSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
}
-ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-bool IsSplitterSupported(const BackendId& backend,
- const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
- ARMNN_NO_DEPRECATE_WARN_END
-}
-
bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4070802be8..a39b6b1a42 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -139,27 +139,6 @@ IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
}
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name)
-{
- Optional<ConstTensor> biases;
- return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
-}
-
-
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name)
-{
- return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
- armnn::Optional<ConstTensor>(biases), name);
-}
-
-
IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
{
return pNetworkImpl->AddDequantizeLayer(name);
@@ -264,17 +243,6 @@ IConnectableLayer* INetwork::AddMergeLayer(const char* name)
return pNetworkImpl->AddMergeLayer(name);
}
-IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name)
-{
- return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
-}
-
-IConnectableLayer* INetwork::AddAbsLayer(const char* name)
-{
- return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
{
return pNetworkImpl->AddAdditionLayer(name);
@@ -300,20 +268,6 @@ IConnectableLayer* INetwork::AddRankLayer(const char* name)
return pNetworkImpl->AddRankLayer(name);
}
-IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
- const char* name)
-{
- ResizeDescriptor resizeDescriptor;
- resizeDescriptor.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
- resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
- resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
- resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
- resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
-
- return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
-}
-
IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
const char* name)
{
@@ -426,27 +380,6 @@ IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
return pNetworkImpl->AddMinimumLayer(name);
}
-IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
-{
- return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
-}
-
-IConnectableLayer* INetwork::AddEqualLayer(const char* name)
-{
- return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
-}
-
-IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
-{
- return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-IConnectableLayer* INetwork::AddGatherLayer(const char* name)
-{
- GatherDescriptor gatherDescriptor{};
- return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
-}
-
IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
const char* name)
{
@@ -527,10 +460,12 @@ IConnectableLayer* INetwork::AddChannelShuffleLayer(const ChannelShuffleDescript
return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void INetwork::Accept(ILayerVisitor& visitor) const
{
return pNetworkImpl->Accept(visitor);
}
+ARMNN_NO_DEPRECATE_WARN_END
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
@@ -1774,23 +1709,6 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
- // Run backend specific optimizations (deprecated)
- for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
- {
- auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
- auto backendPtr = factoryFun();
- ARMNN_ASSERT(backendPtr.get() != nullptr);
-
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- auto backendSpecificOptimizations = backendPtr->GetOptimizations();
- ARMNN_NO_DEPRECATE_WARN_END
-
- if (!backendSpecificOptimizations.empty())
- {
- Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
- }
- }
-
return optNet;
}
bool NetworkImpl::GetShapeInferenceMethod()
@@ -1938,15 +1856,6 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescr
return layer;
}
-IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
-{
- Optional<ConstTensor> optionalWeights(weights);
- return AddFullyConnectedLayer(fullyConnectedDescriptor, optionalWeights, biases, name);
-}
-
IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name)
{
@@ -2060,25 +1969,6 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name)
-{
- Optional<ConstTensor> biases;
- return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
-}
-
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name)
-{
- Optional<ConstTensor> optionalBiases(biases);
- return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
-}
-
IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
const ConstTensor& anchors, const char* name)
{
@@ -2147,17 +2037,6 @@ IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
return m_Graph->AddLayer<MinimumLayer>(name);
}
-IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name)
-{
- return AddConcatLayer(mergerDescriptor, name);
-}
-
-IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
-{
- return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
{
return m_Graph->AddLayer<AdditionLayer>(name);
@@ -2201,20 +2080,6 @@ IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDes
return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
- const char* name)
-{
- ResizeDescriptor resizeDescriptor;
- resizeDescriptor.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
- resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
- resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
- resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
- resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
-
- return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
-}
-
IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
{
return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
@@ -2452,27 +2317,6 @@ IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescripto
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
-{
- return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
-}
-
-IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
-{
- return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
-}
-
-IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
-{
- return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
-{
- GatherDescriptor gatherDescriptor{};
- return AddGatherLayer(gatherDescriptor, name);
-}
-
IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name)
{
@@ -2863,6 +2707,7 @@ IConnectableLayer* NetworkImpl::AddUnidirectionalSequenceLstmLayer(
return layer;
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void NetworkImpl::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
@@ -2870,6 +2715,7 @@ void NetworkImpl::Accept(ILayerVisitor& visitor) const
layer->Accept(visitor);
};
}
+ARMNN_NO_DEPRECATE_WARN_END
void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 11759c71de..eb1d39d2f6 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -41,9 +41,6 @@ public:
IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddAbsLayer(const char* name = nullptr);
-
IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name = nullptr);
@@ -78,12 +75,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -105,19 +102,6 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr);
-
IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
IConnectableLayer* AddDetectionPostProcessLayer(
@@ -130,9 +114,6 @@ public:
IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddEqualLayer(const char* name = nullptr);
-
IConnectableLayer* AddMergeLayer(const char* name = nullptr);
IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
@@ -148,21 +129,9 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
- IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("This AddGatherLayer overload is deprecated")
- IConnectableLayer* AddGatherLayer(const char* name = nullptr);
-
IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
-
IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
const char* name = nullptr);
@@ -185,10 +154,6 @@ public:
IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
- IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name = nullptr);
-
IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
@@ -220,19 +185,12 @@ public:
IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
- IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr);
-
IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr);
IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
-
IConnectableLayer* AddShapeLayer(const char* name = nullptr);
IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
@@ -274,7 +232,9 @@ public:
const LstmInputParams& params,
const char* name = nullptr);
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const;
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index da2c39d4b6..3fc93df727 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -378,16 +378,6 @@ void StringifyLayerParameters<ReshapeDescriptor>::Serialize(ParameterStringifyFu
fn("TargetShape",ss.str());
}
-void StringifyLayerParameters<ResizeBilinearDescriptor>::Serialize(ParameterStringifyFunction& fn,
- const ResizeBilinearDescriptor& desc)
-{
- fn("TargetWidth", std::to_string(desc.m_TargetWidth));
- fn("TargetHeight", std::to_string(desc.m_TargetHeight));
- fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
- fn("AlignCorners", std::to_string(desc.m_AlignCorners));
- fn("HalfPixelCenters", std::to_string(desc.m_HalfPixelCenters));
-}
-
void StringifyLayerParameters<ResizeDescriptor>::Serialize(ParameterStringifyFunction& fn,
const ResizeDescriptor& desc)
{
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 8a3630ce9d..5c1e6f3759 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -139,11 +139,6 @@ template <> struct StringifyLayerParameters<ReshapeDescriptor>
static void Serialize(ParameterStringifyFunction& fn, const ReshapeDescriptor& desc);
};
-template <> struct StringifyLayerParameters<ResizeBilinearDescriptor>
-{
- static void Serialize(ParameterStringifyFunction& fn, const ResizeBilinearDescriptor& desc);
-};
-
template <> struct StringifyLayerParameters<ResizeDescriptor>
{
static void Serialize(ParameterStringifyFunction& fn, const ResizeDescriptor& desc);
diff --git a/src/armnn/SubgraphView.hpp b/src/armnn/SubgraphView.hpp
index cb9e415dd2..af6054283e 100644
--- a/src/armnn/SubgraphView.hpp
+++ b/src/armnn/SubgraphView.hpp
@@ -98,10 +98,4 @@ private:
/// The list of pointers to the layers of the parent graph.
Layers m_Layers;
};
-
-///
-/// Old SubGraph definition kept for backward compatibility only.
-///
-using SubGraph ARMNN_DEPRECATED_MSG("SubGraph is deprecated, use SubgraphView instead") = SubgraphView;
-
} // namespace armnn
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 7aa4099641..e103b7fad3 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -46,9 +46,11 @@ void AbsLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void AbsLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitAbsLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 0e5ccb042a..9ab66624f6 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create an AbsLayer.
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 7bfa28ef73..3abb4c46da 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -45,9 +45,11 @@ void ActivationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ActivationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitActivationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 5ffcc3e1f5..47b7f66280 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -26,7 +26,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index 8b1f2a8dff..b6db7062be 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -32,9 +32,11 @@ AdditionLayer* AdditionLayer::Clone(Graph& graph) const
return CloneBase<AdditionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void AdditionLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitAdditionLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 4af576a130..71a8553078 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -23,7 +23,9 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
AdditionLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create an AdditionLayer.
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 219f34682c..5e469a4d07 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -86,9 +86,11 @@ void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 761d4a0a36..f2125361ce 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ArgMinMaxLayer.
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index e3ee643ac5..e52b986add 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -70,6 +70,7 @@ Layer::ConstantTensors BatchNormalizationLayer::GetConstantTensorsByRef()
return {m_Mean, m_Variance, m_Beta, m_Gamma};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedMean(m_Mean);
@@ -85,6 +86,7 @@ void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitBatchNormalizationLayer(
this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 2777633a34..10ca7eca25 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -39,7 +39,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 4b33b96229..0b6eab54f0 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -95,9 +95,11 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ outputShape });
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void BatchToSpaceNdLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitBatchToSpaceNdLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index da7585b51e..bb6eb7129d 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a BatchToSpaceNdLayer.
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index 16dd9a3744..485bbf0158 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -46,10 +46,12 @@ void CastLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void CastLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("CastLayer VisitCastLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/CastLayer.hpp b/src/armnn/layers/CastLayer.hpp
index 8a9ea43934..e0448131a2 100644
--- a/src/armnn/layers/CastLayer.hpp
+++ b/src/armnn/layers/CastLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a CastLayer.
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index a3b85f1ba7..884f3ab03d 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -46,9 +46,12 @@ void ChannelShuffleLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
}
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ChannelShuffleLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("ChannelShuffleLayer: VisitChannelShuffleLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
} \ No newline at end of file
diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp
index 399b651f5e..903d161107 100644
--- a/src/armnn/layers/ChannelShuffleLayer.hpp
+++ b/src/armnn/layers/ChannelShuffleLayer.hpp
@@ -11,7 +11,9 @@ namespace armnn
class ChannelShuffleLayer : public LayerWithParameters<ChannelShuffleDescriptor>
{
public:
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
/// Creates a dynamically-allocated copy of this layer.
/// @param graph The graph into which this layer is being cloned
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 399834d72d..c644cb17c7 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -74,9 +74,11 @@ void ComparisonLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ComparisonLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitComparisonLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index bcb0dc2fdd..07534afab1 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -35,7 +35,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ComparisonLayer
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 238fdb66d9..892c18e62c 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -318,9 +318,11 @@ void ConcatLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConcatLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitConcatLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 4315d66436..fefedea608 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -44,7 +44,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConcatLayer.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index feeb762263..e738e59bdb 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -62,12 +62,14 @@ void ConstantLayer::ValidateTensorShapesFromInputs()
outShape);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConstantLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
ConstTensor layerOutputTensor(managedLayerOutput.GetTensorInfo(), managedLayerOutput.Map());
visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index ead8816684..a9a9d37f54 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -39,7 +39,9 @@ public:
/// Free up the constant source data stored by the layer.
void ReleaseConstantData() override {}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 3577723a38..b7fa3a6b3f 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -47,6 +47,7 @@ void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
@@ -54,5 +55,6 @@ void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index d9df0bdf38..d2c006655c 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertBf16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 3b6f72c440..77e6f668ac 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -47,6 +47,7 @@ void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
@@ -54,5 +55,6 @@ void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index 4eadb9f11a..59faf6486d 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertFp16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index f909769b9d..6a003dc922 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -48,6 +48,7 @@ void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
@@ -55,5 +56,6 @@ void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToBf16Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 57fbe13e12..8e33cb2d6a 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertFp32ToBf16Layer.
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 3e6f055a4a..8c96909215 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,6 +47,7 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
{
// These conversion layers are only inserted by the
@@ -54,5 +55,6 @@ void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index 5652a472a2..e331c7d59a 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -27,7 +27,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertFp32ToFp16Layer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index b7bf0462d8..ae29d833e8 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -143,6 +143,7 @@ Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
@@ -158,6 +159,7 @@ void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index a33cda27cb..844747831c 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -42,7 +42,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index ade09ed3d4..07d59be7a3 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -52,11 +52,13 @@ void DebugLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
IgnoreUnused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index e71e05a8d5..054f5e4d2b 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DebugLayer.
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index dfa575b7a3..ba06ad6c31 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -75,9 +75,11 @@ void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitDepthToSpaceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index 0730d4d3ea..d9f6752cbd 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -35,7 +35,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DepthToSpaceLayer.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index ed52b39050..86c994745c 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -148,6 +148,7 @@ Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
@@ -163,6 +164,7 @@ void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 51f6ea9453..8f8f020a0f 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -41,7 +41,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index cbe9ae17b5..f8a2e057ac 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -46,9 +46,11 @@ void DequantizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DequantizeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitDequantizeLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index a5750ddaab..99bde85f72 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DequantizeLayer.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index bd94d1d281..41c44d08a9 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -78,6 +78,7 @@ Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef()
return { m_Anchors };
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedAnchors(m_Anchors);
@@ -85,6 +86,7 @@ void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
m_Anchors->Unmap();
}
+ARMNN_NO_DEPRECATE_WARN_END
void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index b409134c1c..1826645fc6 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index 5b032ce998..17b671a5f4 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -32,9 +32,11 @@ DivisionLayer* DivisionLayer::Clone(Graph& graph) const
return CloneBase<DivisionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DivisionLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitDivisionLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 4427a4c4cb..91bccfc184 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -24,7 +24,9 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
DivisionLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DivisionLayer.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 8c94106818..6f07cf93f9 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -61,9 +61,11 @@ void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index f6f8862da4..1261882e0b 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ElementwiseUnaryLayer
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 102a6725a7..69f0166d0e 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -46,11 +46,13 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 78e49e6474..c115c63f33 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 41471c3412..45fe07244b 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -51,9 +51,11 @@ void FillLayer::ValidateTensorShapesFromInputs()
inferredShapes[0][0]);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FillLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitFillLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index eeed141128..096d9ba7dc 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -27,7 +27,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a FillLayer.
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index e03bdb16ff..a975ee8d97 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -45,9 +45,11 @@ void FloorLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FloorLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitFloorLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 07cf151a8a..2b16cfab26 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a FloorLayer.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 259d4149c8..2c41d74923 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -80,10 +80,12 @@ Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitFullyConnectedLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index 5639bf27b4..e97282d73f 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,7 +43,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index cdbdaabcdc..e8b67b8348 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -83,9 +83,11 @@ void GatherLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void GatherLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitGatherLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 3bc8c69bc4..8c294079c3 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a GatherLayer.
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index 0f96611792..21246f146b 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -35,9 +35,11 @@ void InputLayer::ValidateTensorShapesFromInputs()
"InputLayer should already have the TensorInfo set.");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void InputLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitInputLayer(this, this->GetBindingId(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index ff6b521bf0..2b73dcec35 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create an InputLayer.
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 87c6877df8..657b44220d 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -46,9 +46,11 @@ void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitInstanceNormalizationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index 799cf28f8c..addd61e4f8 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a InstanceNormalizationLayer.
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index c96e708075..7bddbf1f18 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -46,9 +46,11 @@ void L2NormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitL2NormalizationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 5d58077ba8..21072b20a0 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a L2NormalizationLayer.
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 24e79ce8ae..ea2518289f 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -45,9 +45,11 @@ void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitLogSoftmaxLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index b21bece98d..9963f85f30 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -29,7 +29,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a LogSoftmaxLayer.
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 0ae5ea5641..3940b85e7b 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -72,9 +72,11 @@ void LogicalBinaryLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp
index c6b024b36b..caeaa0a1af 100644
--- a/src/armnn/layers/LogicalBinaryLayer.hpp
+++ b/src/armnn/layers/LogicalBinaryLayer.hpp
@@ -35,7 +35,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a LogicalBinaryLayer
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 0fea668b97..a18fdb062a 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -300,6 +300,7 @@ Layer::ConstantTensors LstmLayer::GetConstantTensorsByRef()
m_LayerNormParameters.m_OutputLayerNormWeights};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void LstmLayer::Accept(ILayerVisitor& visitor) const
{
LstmInputParams inputParams;
@@ -509,6 +510,7 @@ void LstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index dc6d12a1d8..fbcc03dd6f 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -44,7 +44,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 608a71eba6..6defdab076 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -41,10 +41,12 @@ void MapLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT(GetNumOutputSlots() == 0);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MapLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("MapLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MapLayer.hpp b/src/armnn/layers/MapLayer.hpp
index 620caf73e9..d82c44a36f 100644
--- a/src/armnn/layers/MapLayer.hpp
+++ b/src/armnn/layers/MapLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a MapLayer.
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index d57e9e63ab..95faeea2f2 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -31,9 +31,11 @@ MaximumLayer* MaximumLayer::Clone(Graph& graph) const
return CloneBase<MaximumLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MaximumLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMaximumLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 743f79b373..f032b8867d 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -24,7 +24,9 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MaximumLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a MaximumLayer.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index 9d4265cdcf..b704e2a336 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -103,9 +103,11 @@ void MeanLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MeanLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMeanLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 3a094bf6fe..94b0cbe1a3 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -29,7 +29,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a MeanLayer.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 40c1b98012..61fa462e94 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -49,11 +49,13 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index b913c529e5..3c6fd0d8d7 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index c96f92bc5e..689678e693 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -49,11 +49,13 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 47379701c7..778770132c 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index 74a31a87b8..2bd29f286d 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -58,9 +58,11 @@ std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorS
return {inputShapes[0]};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MergeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMergeLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 07f69004b5..d7cfcf3d1f 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -33,7 +33,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a MergeLayer.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index f60815ed6b..38ab442fd5 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -32,9 +32,11 @@ MinimumLayer* MinimumLayer::Clone(Graph& graph) const
return CloneBase<MinimumLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MinimumLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMinimumLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 2db06292fd..634591e935 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -24,7 +24,10 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MinimumLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a MinimumLayer.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 8fc13aca76..4ff188cc37 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -32,9 +32,11 @@ MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
return CloneBase<MultiplicationLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MultiplicationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMultiplicationLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 692f40784c..8acf4f6d0d 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -24,7 +24,10 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MultiplicationLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a MultiplicationLayer.
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 4bf97edb72..bd38fa43b5 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -46,9 +46,11 @@ void NormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void NormalizationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitNormalizationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 00a4435527..e36e8863a8 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a NormalizationLayer.
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index d14337fd11..579aede6b0 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -37,9 +37,11 @@ void OutputLayer::ValidateTensorShapesFromInputs()
"OutputLayer: Input slot must be connected.");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void OutputLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitOutputLayer(this, GetBindingId(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 408a28a6f3..d2bdf19ddd 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -40,7 +40,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create an OutputLayer.
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 2c53f20703..78af9d3c47 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -71,9 +71,11 @@ void PadLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PadLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPadLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index 5664997597..9a31ae5d60 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape> &inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a PadLayer.
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 859e687cb3..1c563addf9 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -57,9 +57,11 @@ void PermuteLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PermuteLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPermuteLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index 67be2e1939..db256b361b 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -60,7 +60,10 @@ public:
GetPermutation().IsEqual(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation());
}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a PermuteLayer.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 0deafaacdd..d22bce2022 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -117,9 +117,11 @@ void Pooling2dLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPooling2dLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 90c9a44fbd..677c10b661 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a Pooling2dLayer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 67c1db4011..14dffe5e80 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -49,11 +49,13 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
m_PreCompiledObject = std::move(preCompiledObject);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index 0db1472413..e2c5e802fb 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -33,7 +33,10 @@ public:
void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 18d81ae9b6..9fb9f07f8c 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -116,9 +116,11 @@ void PreluLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PreluLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPreluLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 511be29d17..eecffbcd22 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a PreluLayer.
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 0294afdc0d..493e3fe189 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -302,6 +302,7 @@ Layer::ConstantTensors QLstmLayer::GetConstantTensorsByRef()
m_LayerNormParameters.m_OutputLayerNormWeights};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void QLstmLayer::Accept(ILayerVisitor& visitor) const
{
LstmInputParams inputParams;
@@ -531,6 +532,7 @@ void QLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 38a0464da6..12774a935e 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -107,7 +107,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 6ce28c4153..e37d6f5300 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -45,9 +45,11 @@ void QuantizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void QuantizeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitQuantizeLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index 2f331a493c..d8898ba1e9 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -23,7 +23,10 @@ public:
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
QuantizeLayer(const char* name);
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index be50f4863b..81642198fb 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -169,6 +169,7 @@ Layer::ConstantTensors QuantizedLstmLayer::GetConstantTensorsByRef()
};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
{
QuantizedLstmInputParams inputParams;
@@ -305,6 +306,7 @@ void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 25cc7b7d8b..fe7d423145 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -69,7 +69,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 3b14ef0d93..a1e06efa11 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -41,10 +41,13 @@ void RankLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
}
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void RankLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitRankLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void RankLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index fbd2824bb5..416e1b0f6e 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -22,7 +22,10 @@ class RankLayer : public Layer
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 31a2dfa479..07651fca67 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -102,9 +102,11 @@ void ReduceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ReduceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitReduceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp
index fd4f2073f1..a6ac44e69c 100644
--- a/src/armnn/layers/ReduceLayer.hpp
+++ b/src/armnn/layers/ReduceLayer.hpp
@@ -27,7 +27,10 @@ public:
/// will lead to a valid configuration of @ref ReduceLayer.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a ReduceLayer.
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index f303ff7c68..1b9e691bcf 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -53,9 +53,11 @@ void ReshapeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ReshapeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitReshapeLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index 78335e6a1a..d107b5cfc8 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -45,7 +45,10 @@ public:
m_Param.m_TargetShape == PolymorphicDowncast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape;
}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a ReshapeLayer.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 3a390d43cd..c190f494d1 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -75,9 +75,11 @@ void ResizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ResizeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitResizeLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index 34625857f8..fab18c7716 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a ResizeLayer.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 9c09701ab8..a0572da7c3 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -47,9 +47,11 @@ void RsqrtLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void RsqrtLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitRsqrtLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index 4fcbf72120..a31aea6498 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create an RsqrtLayer.
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index 4193fa9aab..6a55a2d296 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -59,11 +59,13 @@ std::vector<TensorShape> ShapeLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ outputShape });
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ShapeLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("ShapeLayer VisitShapeLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
void ShapeLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/ShapeLayer.hpp b/src/armnn/layers/ShapeLayer.hpp
index fee285c2f0..35ef873792 100644
--- a/src/armnn/layers/ShapeLayer.hpp
+++ b/src/armnn/layers/ShapeLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index b512ca4915..e7d8f1ed19 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -59,9 +59,11 @@ std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ outputShape });
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SliceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSliceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index 0505a056c5..dda66a1be6 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SliceLayer.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 9882da42b0..eab5b85e45 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -46,9 +46,11 @@ void SoftmaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSoftmaxLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index cbdd7c58f9..035e7bcf2d 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SoftmaxLayer.
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index b9e33314ef..3f58b3f6c9 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -83,9 +83,11 @@ void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 28857d8aba..70972bd8b3 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SpaceToBatchNdLayer.
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 90ba8fc8c3..1a3112c495 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -77,9 +77,11 @@ void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index a8bc1089a3..267ac3b089 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SpaceToDepthLayer.
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 5e6622e13a..c1e191c1a8 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -241,9 +241,11 @@ void SplitterLayer::ValidateTensorShapesFromInputs()
}
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SplitterLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSplitterLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index f90696b1ad..1fc37ef295 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -43,7 +43,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SplitterLayer.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 11935a1acf..fe2d123244 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -95,9 +95,11 @@ void StackLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void StackLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitStackLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 3d05da0bf6..8d38907de7 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a StackLayer.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index 6281f3e51e..ccf152921a 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -41,8 +41,10 @@ void StandInLayer::ValidateTensorShapesFromInputs()
// so do nothing here.
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void StandInLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitStandInLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index 2864753efa..bb500065eb 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -37,7 +37,10 @@ public:
/// Accepts a visitor object and calls VisitStandInLayer() method.
/// @param visitor The visitor on which to call VisitStandInLayer() method.
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a StandInLayer.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index c8f36355ae..aa7012c9a5 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -111,9 +111,11 @@ void StridedSliceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitStridedSliceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 35ac3709da..7e17cb2e84 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a StridedSliceLayer.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 34087bd466..bed708513e 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -32,9 +32,11 @@ SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
return CloneBase<SubtractionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SubtractionLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSubtractionLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 527b50bcad..8c31479c8e 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -24,7 +24,10 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
SubtractionLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SubtractionLayer.
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 879263955f..258a7ffaec 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -52,9 +52,11 @@ void SwitchLayer::ValidateTensorShapesFromInputs()
GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SwitchLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSwitchLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index 025f379c99..a36261b51a 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SwitchLayer.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index c774dd0bbf..acdbebe802 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -121,6 +121,7 @@ Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
@@ -136,6 +137,7 @@ void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 1b17dac3c6..b6db41c2b7 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -40,7 +40,10 @@ public:
/// @return A vector of the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 8951fe4637..ffd8693049 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -57,9 +57,11 @@ void TransposeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void TransposeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitTransposeLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index a4245242ed..8449db4d9d 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -58,7 +58,10 @@ public:
GetPermutation().IsEqual(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation());
}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a TransposeLayer.
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 45417069e4..a3671a0c42 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -307,11 +307,13 @@ Layer::ConstantTensors UnidirectionalSequenceLstmLayer::GetConstantTensorsByRef(
m_LayerNormParameters.m_OutputLayerNormWeights};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void UnidirectionalSequenceLstmLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("UnidirectionalSequenceLstmLayer: VisitUnidirectionalSequenceLstmLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
void UnidirectionalSequenceLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
index fb59f01ab6..857d2776a9 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
@@ -44,7 +44,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index 4a43f9ff21..fa5dd9ebf2 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -41,10 +41,12 @@ void UnmapLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT(GetNumOutputSlots() == 0);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void UnmapLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("UnmapLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp
index 12d4342d62..3d1d11534e 100644
--- a/src/armnn/layers/UnmapLayer.hpp
+++ b/src/armnn/layers/UnmapLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a UnmapLayer.
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index d3d8698972..e21e777409 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -58,73 +58,6 @@ void TestLstmLayerVisitor::CheckDescriptor(const LstmDescriptor& descriptor)
CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
}
-void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
- const ConstTensor* expected,
- const ConstTensor* actual)
-{
- if (expected == nullptr)
- {
- CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
- }
- else
- {
- CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
- if (actual != nullptr)
- {
- CheckConstTensors(*expected, *actual);
- }
- }
-}
-
-void TestLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
-{
- CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
- CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
- CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
- CheckConstTensorPtrs("InputToInputWeights",
- m_InputParams.m_InputToInputWeights, inputParams.m_InputToInputWeights);
- CheckConstTensorPtrs("InputToForgetWeights",
- m_InputParams.m_InputToForgetWeights, inputParams.m_InputToForgetWeights);
- CheckConstTensorPtrs("InputToCellWeights", m_InputParams.m_InputToCellWeights, inputParams.m_InputToCellWeights);
- CheckConstTensorPtrs(
- "InputToOutputWeights", m_InputParams.m_InputToOutputWeights, inputParams.m_InputToOutputWeights);
- CheckConstTensorPtrs(
- "RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, inputParams.m_RecurrentToInputWeights);
- CheckConstTensorPtrs(
- "RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, inputParams.m_RecurrentToForgetWeights);
- CheckConstTensorPtrs(
- "RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, inputParams.m_RecurrentToCellWeights);
- CheckConstTensorPtrs(
- "RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, inputParams.m_RecurrentToOutputWeights);
- CheckConstTensorPtrs(
- "CellToInputWeights", m_InputParams.m_CellToInputWeights, inputParams.m_CellToInputWeights);
- CheckConstTensorPtrs(
- "CellToForgetWeights", m_InputParams.m_CellToForgetWeights, inputParams.m_CellToForgetWeights);
- CheckConstTensorPtrs(
- "CellToOutputWeights", m_InputParams.m_CellToOutputWeights, inputParams.m_CellToOutputWeights);
- CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
- CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
- CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
-}
-
-void TestQLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
- const ConstTensor* expected,
- const ConstTensor* actual)
-{
- if (expected == nullptr)
- {
- CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
- }
- else
- {
- CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
- if (actual != nullptr)
- {
- CheckConstTensors(*expected, *actual);
- }
- }
-}
-
void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
{
CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
@@ -134,95 +67,6 @@ void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
}
-void TestQLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
-{
- CheckConstTensorPtrs("InputToInputWeights",
- m_InputParams.m_InputToInputWeights,
- inputParams.m_InputToInputWeights);
-
- CheckConstTensorPtrs("InputToForgetWeights",
- m_InputParams.m_InputToForgetWeights,
- inputParams.m_InputToForgetWeights);
-
- CheckConstTensorPtrs("InputToCellWeights",
- m_InputParams.m_InputToCellWeights,
- inputParams.m_InputToCellWeights);
-
- CheckConstTensorPtrs("InputToOutputWeights",
- m_InputParams.m_InputToOutputWeights,
- inputParams.m_InputToOutputWeights);
-
- CheckConstTensorPtrs("RecurrentToInputWeights",
- m_InputParams.m_RecurrentToInputWeights,
- inputParams.m_RecurrentToInputWeights);
-
- CheckConstTensorPtrs("RecurrentToForgetWeights",
- m_InputParams.m_RecurrentToForgetWeights,
- inputParams.m_RecurrentToForgetWeights);
-
- CheckConstTensorPtrs("RecurrentToCellWeights",
- m_InputParams.m_RecurrentToCellWeights,
- inputParams.m_RecurrentToCellWeights);
-
- CheckConstTensorPtrs("RecurrentToOutputWeights",
- m_InputParams.m_RecurrentToOutputWeights,
- inputParams.m_RecurrentToOutputWeights);
-
- CheckConstTensorPtrs("CellToInputWeights",
- m_InputParams.m_CellToInputWeights,
- inputParams.m_CellToInputWeights);
-
- CheckConstTensorPtrs("CellToForgetWeights",
- m_InputParams.m_CellToForgetWeights,
- inputParams.m_CellToForgetWeights);
-
- CheckConstTensorPtrs("CellToOutputWeights",
- m_InputParams.m_CellToOutputWeights,
- inputParams.m_CellToOutputWeights);
-
- CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
- CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
-
- CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
- CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
- CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
- CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
-
- CheckConstTensorPtrs("InputLayerNormWeights",
- m_InputParams.m_InputLayerNormWeights,
- inputParams.m_InputLayerNormWeights);
-
- CheckConstTensorPtrs("ForgetLayerNormWeights",
- m_InputParams.m_ForgetLayerNormWeights,
- inputParams.m_ForgetLayerNormWeights);
-
- CheckConstTensorPtrs("CellLayerNormWeights",
- m_InputParams.m_CellLayerNormWeights,
- inputParams.m_CellLayerNormWeights);
-
- CheckConstTensorPtrs("OutputLayerNormWeights",
- m_InputParams.m_OutputLayerNormWeights,
- inputParams.m_OutputLayerNormWeights);
-}
-
-void TestQuantizedLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
- const ConstTensor* expected,
- const ConstTensor* actual)
-{
- if (expected == nullptr)
- {
- CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
- }
- else
- {
- CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
- if (actual != nullptr)
- {
- CheckConstTensors(*expected, *actual);
- }
- }
-}
-
void TestQuantizedLstmLayerVisitor::CheckInputParameters(const QuantizedLstmInputParams& inputParams)
{
CheckConstTensorPtrs("InputToInputWeights",
@@ -285,7 +129,7 @@ TEST_CASE("CheckConvolution2dLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedConvolution2dLayer")
@@ -309,7 +153,7 @@ TEST_CASE("CheckNamedConvolution2dLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckConvolution2dLayerWithBiases")
@@ -338,7 +182,7 @@ TEST_CASE("CheckConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
@@ -368,7 +212,7 @@ TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckDepthwiseConvolution2dLayer")
@@ -391,7 +235,7 @@ TEST_CASE("CheckDepthwiseConvolution2dLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
@@ -418,7 +262,7 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
weights,
EmptyOptional(),
layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
@@ -447,7 +291,7 @@ TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
@@ -477,7 +321,7 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckFullyConnectedLayer")
@@ -500,8 +344,8 @@ TEST_CASE("CheckFullyConnectedLayer")
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
- weightsLayer->Accept(weightsVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedFullyConnectedLayer")
@@ -525,8 +369,8 @@ TEST_CASE("CheckNamedFullyConnectedLayer")
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
- weightsLayer->Accept(weightsVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckFullyConnectedLayerWithBiases")
@@ -556,9 +400,9 @@ TEST_CASE("CheckFullyConnectedLayerWithBiases")
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- weightsLayer->Accept(weightsVisitor);
- biasesLayer->Accept(biasesVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ biasesLayer->ExecuteStrategy(biasesVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
@@ -589,9 +433,9 @@ TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- weightsLayer->Accept(weightsVisitor);
- biasesLayer->Accept(biasesVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ biasesLayer->ExecuteStrategy(biasesVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckBatchNormalizationLayer")
@@ -621,7 +465,7 @@ TEST_CASE("CheckBatchNormalizationLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedBatchNormalizationLayer")
@@ -653,7 +497,7 @@ TEST_CASE("CheckNamedBatchNormalizationLayer")
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
descriptor, mean, variance, beta, gamma, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckConstLayer")
@@ -667,7 +511,7 @@ TEST_CASE("CheckConstLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConstantLayer(input);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedConstLayer")
@@ -682,7 +526,7 @@ TEST_CASE("CheckNamedConstLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckLstmLayerBasic")
@@ -754,7 +598,7 @@ TEST_CASE("CheckLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerBasic")
@@ -827,7 +671,7 @@ TEST_CASE("CheckNamedLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckLstmLayerCifgDisabled")
@@ -918,7 +762,7 @@ TEST_CASE("CheckLstmLayerCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerCifgDisabled")
@@ -1010,7 +854,7 @@ TEST_CASE("CheckNamedLstmLayerCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
// TODO add one with peephole
@@ -1097,7 +941,7 @@ TEST_CASE("CheckLstmLayerPeephole")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
@@ -1211,7 +1055,7 @@ TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerPeephole")
@@ -1298,7 +1142,7 @@ TEST_CASE("CheckNamedLstmLayerPeephole")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
// TODO add one with projection
@@ -1385,7 +1229,7 @@ TEST_CASE("CheckLstmLayerProjection")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerProjection")
@@ -1472,7 +1316,7 @@ TEST_CASE("CheckNamedLstmLayerProjection")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerBasic")
@@ -1544,7 +1388,7 @@ TEST_CASE("CheckQLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedQLstmLayerBasic")
@@ -1617,7 +1461,7 @@ TEST_CASE("CheckNamedQLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgDisabled")
@@ -1712,7 +1556,7 @@ TEST_CASE("CheckQLstmLayerCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
@@ -1829,7 +1673,7 @@ TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
@@ -1919,7 +1763,7 @@ TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerProjectionEnabled")
@@ -2009,7 +1853,7 @@ TEST_CASE("CheckQLstmLayerProjectionEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
@@ -2132,7 +1976,7 @@ TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
@@ -2222,7 +2066,7 @@ TEST_CASE("CheckQuantizedLstmLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedQuantizedLstmLayer")
@@ -2312,7 +2156,7 @@ TEST_CASE("CheckNamedQuantizedLstmLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 35e2e872f7..5538852b60 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -5,9 +5,14 @@
#pragma once
#include "TestLayerVisitor.hpp"
+#include "LayersFwd.hpp"
#include <armnn/Descriptors.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
+
+#include <doctest/doctest.h>
namespace armnn
{
@@ -27,17 +32,33 @@ public:
virtual ~TestConvolution2dLayerVisitor() {}
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(convolution2dDescriptor);
- CheckConstTensors(m_Weights, weights);
- CheckOptionalConstTensors(m_Biases, biases);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Convolution2d:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
+ CheckConstTensors(m_Weights, constants[0]);
+ if (m_Biases.has_value())
+ {
+ CHECK(constants.size() == 2);
+ CheckConstTensors(m_Biases.value(), constants[1]);
+ }
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -64,17 +85,33 @@ public:
virtual ~TestDepthwiseConvolution2dLayerVisitor() {}
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(convolution2dDescriptor);
- CheckConstTensors(m_Weights, weights);
- CheckOptionalConstTensors(m_Biases, biases);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::DepthwiseConvolution2d:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
+ CheckConstTensors(m_Weights, constants[0]);
+ if (m_Biases.has_value())
+ {
+ CHECK(constants.size() == 2);
+ CheckConstTensors(m_Biases.value(), constants[1]);
+ }
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -97,13 +134,27 @@ public:
virtual ~TestFullyConnectedLayerVistor() {}
- void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(fullyConnectedDescriptor);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::FullyConnected:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::FullyConnectedDescriptor&>(descriptor));
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -129,21 +180,31 @@ public:
, m_Gamma(gamma)
{}
- void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& descriptor,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(descriptor);
- CheckConstTensors(m_Mean, mean);
- CheckConstTensors(m_Variance, variance);
- CheckConstTensors(m_Beta, beta);
- CheckConstTensors(m_Gamma, gamma);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::BatchNormalization:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor));
+ CheckConstTensors(m_Mean, constants[0]);
+ CheckConstTensors(m_Variance, constants[1]);
+ CheckConstTensors(m_Beta, constants[2]);
+ CheckConstTensors(m_Gamma, constants[3]);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -166,81 +227,201 @@ public:
, m_Input(input)
{}
- void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckConstTensors(m_Input, input);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Constant:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckConstTensors(m_Input, constants[0]);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
private:
ConstTensor m_Input;
};
-class TestLstmLayerVisitor : public TestLayerVisitor
+// Used to supply utility functions to the actual lstm test visitors
+class LstmVisitor : public TestLayerVisitor
+{
+public:
+ explicit LstmVisitor(const LstmInputParams& params,
+ const char* name = nullptr)
+ : TestLayerVisitor(name)
+ , m_InputParams(params) {}
+
+protected:
+ template<typename LayerType>
+ void CheckInputParameters(const LayerType* layer, const LstmInputParams& inputParams);
+
+ LstmInputParams m_InputParams;
+};
+
+template<typename LayerType>
+void LstmVisitor::CheckInputParameters(const LayerType* layer, const LstmInputParams& inputParams)
+{
+ CheckConstTensorPtrs("OutputGateBias",
+ inputParams.m_OutputGateBias,
+ layer->m_BasicParameters.m_OutputGateBias);
+ CheckConstTensorPtrs("InputToForgetWeights",
+ inputParams.m_InputToForgetWeights,
+ layer->m_BasicParameters.m_InputToForgetWeights);
+ CheckConstTensorPtrs("InputToCellWeights",
+ inputParams.m_InputToCellWeights,
+ layer->m_BasicParameters.m_InputToCellWeights);
+ CheckConstTensorPtrs("InputToOutputWeights",
+ inputParams.m_InputToOutputWeights,
+ layer->m_BasicParameters.m_InputToOutputWeights);
+ CheckConstTensorPtrs("RecurrentToForgetWeights",
+ inputParams.m_RecurrentToForgetWeights,
+ layer->m_BasicParameters.m_RecurrentToForgetWeights);
+ CheckConstTensorPtrs("RecurrentToCellWeights",
+ inputParams.m_RecurrentToCellWeights,
+ layer->m_BasicParameters.m_RecurrentToCellWeights);
+ CheckConstTensorPtrs("RecurrentToOutputWeights",
+ inputParams.m_RecurrentToOutputWeights,
+ layer->m_BasicParameters.m_RecurrentToOutputWeights);
+ CheckConstTensorPtrs("ForgetGateBias",
+ inputParams.m_ForgetGateBias,
+ layer->m_BasicParameters.m_ForgetGateBias);
+ CheckConstTensorPtrs("CellBias",
+ inputParams.m_CellBias,
+ layer->m_BasicParameters.m_CellBias);
+
+ CheckConstTensorPtrs("InputToInputWeights",
+ inputParams.m_InputToInputWeights,
+ layer->m_CifgParameters.m_InputToInputWeights);
+ CheckConstTensorPtrs("RecurrentToInputWeights",
+ inputParams.m_RecurrentToInputWeights,
+ layer->m_CifgParameters.m_RecurrentToInputWeights);
+ CheckConstTensorPtrs("InputGateBias",
+ inputParams.m_InputGateBias,
+ layer->m_CifgParameters.m_InputGateBias);
+
+ CheckConstTensorPtrs("ProjectionBias",
+ inputParams.m_ProjectionBias,
+ layer->m_ProjectionParameters.m_ProjectionBias);
+ CheckConstTensorPtrs("ProjectionWeights",
+ inputParams.m_ProjectionWeights,
+ layer->m_ProjectionParameters.m_ProjectionWeights);
+
+ CheckConstTensorPtrs("CellToInputWeights",
+ inputParams.m_CellToInputWeights,
+ layer->m_PeepholeParameters.m_CellToInputWeights);
+ CheckConstTensorPtrs("CellToForgetWeights",
+ inputParams.m_CellToForgetWeights,
+ layer->m_PeepholeParameters.m_CellToForgetWeights);
+ CheckConstTensorPtrs("CellToOutputWeights",
+ inputParams.m_CellToOutputWeights,
+ layer->m_PeepholeParameters.m_CellToOutputWeights);
+
+ CheckConstTensorPtrs("InputLayerNormWeights",
+ inputParams.m_InputLayerNormWeights,
+ layer->m_LayerNormParameters.m_InputLayerNormWeights);
+ CheckConstTensorPtrs("ForgetLayerNormWeights",
+ inputParams.m_ForgetLayerNormWeights,
+ layer->m_LayerNormParameters.m_ForgetLayerNormWeights);
+ CheckConstTensorPtrs("CellLayerNormWeights",
+ inputParams.m_CellLayerNormWeights,
+ layer->m_LayerNormParameters.m_CellLayerNormWeights);
+ CheckConstTensorPtrs("OutputLayerNormWeights",
+ inputParams.m_OutputLayerNormWeights,
+ layer->m_LayerNormParameters.m_OutputLayerNormWeights);
+}
+
+class TestLstmLayerVisitor : public LstmVisitor
{
public:
explicit TestLstmLayerVisitor(const LstmDescriptor& descriptor,
const LstmInputParams& params,
const char* name = nullptr)
- : TestLayerVisitor(name)
+ : LstmVisitor(params, name)
, m_Descriptor(descriptor)
- , m_InputParams(params)
{}
- void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(descriptor);
- CheckInputParameters(params);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Lstm:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::LstmDescriptor&>(descriptor));
+ CheckInputParameters<const LstmLayer>(PolymorphicDowncast<const LstmLayer*>(layer), m_InputParams);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
void CheckDescriptor(const LstmDescriptor& descriptor);
- void CheckInputParameters(const LstmInputParams& inputParams);
- void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
private:
LstmDescriptor m_Descriptor;
- LstmInputParams m_InputParams;
};
-class TestQLstmLayerVisitor : public TestLayerVisitor
+class TestQLstmLayerVisitor : public LstmVisitor
{
public:
explicit TestQLstmLayerVisitor(const QLstmDescriptor& descriptor,
const LstmInputParams& params,
const char* name = nullptr)
- : TestLayerVisitor(name)
+ : LstmVisitor(params, name)
, m_Descriptor(descriptor)
- , m_InputParams(params)
{}
- void VisitQLstmLayer(const IConnectableLayer* layer,
- const QLstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(descriptor);
- CheckInputParameters(params);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::QLstm:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::QLstmDescriptor&>(descriptor));
+ CheckInputParameters<const QLstmLayer>(PolymorphicDowncast<const QLstmLayer*>(layer), m_InputParams);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
void CheckDescriptor(const QLstmDescriptor& descriptor);
- void CheckInputParameters(const LstmInputParams& inputParams);
- void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
private:
QLstmDescriptor m_Descriptor;
- LstmInputParams m_InputParams;
};
@@ -253,18 +434,31 @@ public:
, m_InputParams(params)
{}
- void VisitQuantizedLstmLayer(const IConnectableLayer* layer,
- const QuantizedLstmInputParams& params,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckInputParameters(params);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::QuantizedLstm:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckInputParameters(m_InputParams);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
- void CheckInputParameters(const QuantizedLstmInputParams& inputParams);
- void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
+ void CheckInputParameters(const QuantizedLstmInputParams& params);
private:
QuantizedLstmInputParams m_InputParams;
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 9acb60df4a..25dab596fd 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -398,26 +398,44 @@ TEST_CASE("NetworkModification_SplitterMultiplication")
TEST_CASE("Network_AddQuantize")
{
- struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ struct Test : public armnn::IStrategy
{
- void VisitQuantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- m_Visited = true;
-
- CHECK(layer);
-
- std::string expectedName = std::string("quantize");
- CHECK(std::string(layer->GetName()) == expectedName);
- CHECK(std::string(name) == expectedName);
-
- CHECK(layer->GetNumInputSlots() == 1);
- CHECK(layer->GetNumOutputSlots() == 1);
-
- const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
- CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
-
- const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input: break;
+ case armnn::LayerType::Output: break;
+ case armnn::LayerType::Quantize:
+ {
+ m_Visited = true;
+
+ CHECK(layer);
+
+ std::string expectedName = std::string("quantize");
+ CHECK(std::string(layer->GetName()) == expectedName);
+ CHECK(std::string(name) == expectedName);
+
+ CHECK(layer->GetNumInputSlots() == 1);
+ CHECK(layer->GetNumOutputSlots() == 1);
+
+ const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+ CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+ CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+ break;
+ }
+ default:
+ {
+ // nothing
+ }
+ }
}
bool m_Visited = false;
@@ -440,7 +458,7 @@ TEST_CASE("Network_AddQuantize")
quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
Test testQuantize;
- graph->Accept(testQuantize);
+ graph->ExecuteStrategy(testQuantize);
CHECK(testQuantize.m_Visited == true);
@@ -448,29 +466,47 @@ TEST_CASE("Network_AddQuantize")
TEST_CASE("Network_AddMerge")
{
- struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ struct Test : public armnn::IStrategy
{
- void VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- m_Visited = true;
-
- CHECK(layer);
-
- std::string expectedName = std::string("merge");
- CHECK(std::string(layer->GetName()) == expectedName);
- CHECK(std::string(name) == expectedName);
-
- CHECK(layer->GetNumInputSlots() == 2);
- CHECK(layer->GetNumOutputSlots() == 1);
-
- const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
- CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
-
- const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
- CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
-
- const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input: break;
+ case armnn::LayerType::Output: break;
+ case armnn::LayerType::Merge:
+ {
+ m_Visited = true;
+
+ CHECK(layer);
+
+ std::string expectedName = std::string("merge");
+ CHECK(std::string(layer->GetName()) == expectedName);
+ CHECK(std::string(name) == expectedName);
+
+ CHECK(layer->GetNumInputSlots() == 2);
+ CHECK(layer->GetNumOutputSlots() == 1);
+
+ const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+ CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
+ CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+ CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
+ break;
+ }
+ default:
+ {
+ // nothing
+ }
+ }
}
bool m_Visited = false;
@@ -493,7 +529,7 @@ TEST_CASE("Network_AddMerge")
merge->GetOutputSlot(0).SetTensorInfo(info);
Test testMerge;
- network->Accept(testMerge);
+ network->ExecuteStrategy(testMerge);
CHECK(testMerge.m_Visited == true);
}
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 66da3ad1ff..8416a8dd0d 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -13,13 +13,12 @@
#include <armnn/BackendHelper.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <armnnUtils/FloatingPointConverter.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
-#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
#include <backendsCommon/TensorHandle.hpp>
@@ -201,10 +200,6 @@ public:
return nullptr;
}
- IBackendInternal::Optimizations GetOptimizations() const override
- {
- return {};
- }
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
{
return std::make_shared<MockLayerSupport>();
@@ -265,10 +260,6 @@ public:
return nullptr;
}
- IBackendInternal::Optimizations GetOptimizations() const override
- {
- return {};
- }
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
{
return std::make_shared<MockLayerSupport>();
@@ -707,30 +698,42 @@ TEST_CASE("BackendCapabilityTest")
TEST_CASE("BackendHintTest")
{
- class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
+ class TestBackendAssignment : public StrategyBase<NoThrowStrategy>
{
public:
- void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
- {
- IgnoreUnused(id, name);
- auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
- CHECK((inputLayer->GetBackendId() == "MockBackend"));
- }
-
- void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
- {
- IgnoreUnused(id, name);
- auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
- CHECK((outputLayer->GetBackendId() == "MockBackend"));
- }
- void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- IgnoreUnused(activationDescriptor, name);
- auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
- CHECK((activation->GetBackendId() == "CustomBackend"));
+ armnn::IgnoreUnused(descriptor, constants, id, name);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input:
+ {
+ auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
+ CHECK((inputLayer->GetBackendId() == "MockBackend"));
+ break;
+ }
+ case armnn::LayerType::Output:
+ {
+ auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
+ CHECK((outputLayer->GetBackendId() == "MockBackend"));
+ break;
+ }
+ case armnn::LayerType::Activation:
+ {
+ auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
+ CHECK((activation->GetBackendId() == "CustomBackend"));
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
};
@@ -802,7 +805,7 @@ TEST_CASE("BackendHintTest")
TestBackendAssignment visitor;
for (auto it = firstLayer; it != lastLayer; ++it)
{
- (*it)->Accept(visitor);
+ (*it)->ExecuteStrategy(visitor);
}
// Clean up the registry for the next test.
backendRegistry.Deregister("MockBackend");
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 8462290f81..3b18e07694 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -19,7 +19,7 @@ TEST_CASE("CheckInputLayerVisitorBindingIdAndName")
NetworkImpl net;
IConnectableLayer *const layer = net.AddInputLayer(1, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
@@ -28,7 +28,7 @@ TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
NetworkImpl net;
IConnectableLayer *const layer = net.AddInputLayer(1);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
@@ -38,7 +38,7 @@ TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
NetworkImpl net;
IConnectableLayer *const layer = net.AddOutputLayer(1, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
@@ -47,7 +47,7 @@ TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
NetworkImpl net;
IConnectableLayer *const layer = net.AddOutputLayer(1);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
}
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.hpp b/src/armnn/test/TestInputOutputLayerVisitor.hpp
index b89089530e..e812f2f97d 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.hpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.hpp
@@ -27,14 +27,28 @@ public:
, visitorId(id)
{};
- void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerBindingId(visitorId, id);
- CheckLayerName(name);
- };
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerBindingId(visitorId, id);
+ CheckLayerName(name);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
+ }
};
class TestOutputLayerVisitor : public TestLayerVisitor
@@ -48,14 +62,28 @@ public:
, visitorId(id)
{};
- void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerBindingId(visitorId, id);
- CheckLayerName(name);
- };
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Output:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerBindingId(visitorId, id);
+ CheckLayerName(name);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
+ }
};
} //namespace armnn
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index ec405119d1..d5f705f0da 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -49,6 +49,62 @@ void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const Cons
}
}
+void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const ConstTensorHandle& actual)
+{
+ auto& actualInfo = actual.GetTensorInfo();
+ CHECK(expected.GetInfo() == actualInfo);
+ CHECK(expected.GetNumDimensions() == actualInfo.GetNumDimensions());
+ CHECK(expected.GetNumElements() == actualInfo.GetNumElements());
+ CHECK(expected.GetNumBytes() == actualInfo.GetNumBytes());
+ if (expected.GetNumBytes() == actualInfo.GetNumBytes())
+ {
+ //check data is the same byte by byte
+ const unsigned char* expectedPtr = static_cast<const unsigned char*>(expected.GetMemoryArea());
+ const unsigned char* actualPtr = static_cast<const unsigned char*>(actual.Map(true));
+ for (unsigned int i = 0; i < expected.GetNumBytes(); i++)
+ {
+ CHECK(*(expectedPtr + i) == *(actualPtr + i));
+ }
+ actual.Unmap();
+ }
+}
+
+void TestLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const std::shared_ptr<ConstTensorHandle> actual)
+{
+ if (expected == nullptr)
+ {
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ }
+ else
+ {
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ if (actual != nullptr)
+ {
+ CheckConstTensors(*expected, *actual);
+ }
+ }
+}
+
+void TestLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const ConstTensor* actual)
+{
+ if (expected == nullptr)
+ {
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ }
+ else
+ {
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ if (actual != nullptr)
+ {
+ CheckConstTensors(*expected, *actual);
+ }
+ }
+}
+
void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
const Optional<ConstTensor>& actual)
{
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index e43227f520..eaf1667800 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -4,13 +4,14 @@
//
#pragma once
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
#include <armnn/Descriptors.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
-// Abstract base class with do nothing implementations for all layer visit methods
-class TestLayerVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
+// Abstract base class with do nothing implementations for all layers
+class TestLayerVisitor : public StrategyBase<NoThrowStrategy>
{
protected:
virtual ~TestLayerVisitor() {}
@@ -19,7 +20,17 @@ protected:
void CheckLayerPointer(const IConnectableLayer* layer);
- void CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual);
+ void CheckConstTensors(const ConstTensor& expected,
+ const ConstTensor& actual);
+ void CheckConstTensors(const ConstTensor& expected,
+ const ConstTensorHandle& actual);
+
+ void CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const ConstTensor* actual);
+ void CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const std::shared_ptr<ConstTensorHandle> actual);
void CheckOptionalConstTensors(const Optional<ConstTensor>& expected, const Optional<ConstTensor>& actual);
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 39c00f4604..cfdaaf529b 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -20,7 +20,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor(descriptor, layerName); \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor, layerName); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name, testName) \
@@ -30,7 +30,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor(descriptor); \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
template<typename Descriptor> Descriptor GetDescriptor();
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index a3c1420388..b1f9512655 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -29,15 +29,31 @@ public: \
: armnn::TestLayerVisitor(layerName) \
, m_Descriptor(descriptor) {}; \
\
- void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
- const Descriptor& descriptor, \
- const char* layerName = nullptr) override \
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer, \
+ const armnn::BaseDescriptor& descriptor, \
+ const std::vector<armnn::ConstTensor>& constants, \
+ const char* layerName, \
+ const armnn::LayerBindingId id = 0) override \
{ \
- CheckLayerPointer(layer); \
- CheckDescriptor(descriptor); \
- CheckLayerName(layerName); \
+ armnn::IgnoreUnused(descriptor, constants, id); \
+ switch (layer->GetType()) \
+ { \
+ case armnn::LayerType::Input: break; \
+ case armnn::LayerType::Output: break; \
+ case armnn::LayerType::name: break; \
+ { \
+ CheckLayerPointer(layer); \
+ CheckDescriptor(static_cast<const Descriptor&>(descriptor)); \
+ CheckLayerName(layerName); \
+ break; \
+ } \
+ default: \
+ { \
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType())); \
+ } \
+ } \
} \
-};
+}; \
} // anonymous namespace
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 00d65f8e76..497c36b079 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -18,7 +18,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor("name##Layer"); \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer("name##Layer"); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, testName) \
@@ -27,7 +27,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor; \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
} // anonymous namespace
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index 519cbbacc6..c0db857b71 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -15,12 +15,28 @@ class Test##name##LayerVisitor : public armnn::TestLayerVisitor \
public: \
explicit Test##name##LayerVisitor(const char* layerName = nullptr) : armnn::TestLayerVisitor(layerName) {}; \
\
- void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
- const char* layerName = nullptr) override \
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer, \
+ const armnn::BaseDescriptor& descriptor, \
+ const std::vector<armnn::ConstTensor>& constants, \
+ const char* layerName, \
+ const armnn::LayerBindingId id = 0) override \
{ \
- CheckLayerPointer(layer); \
- CheckLayerName(layerName); \
+ armnn::IgnoreUnused(descriptor, constants, id); \
+ switch (layer->GetType()) \
+ { \
+ case armnn::LayerType::name: \
+ { \
+ CheckLayerPointer(layer); \
+ CheckLayerName(layerName); \
+ break; \
+ } \
+ default: \
+ { \
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType())); \
+ } \
+ } \
} \
+ \
};
} // anonymous namespace