aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-09-24 15:45:46 +0100
committerJan Eilers <jan.eilers@arm.com>2021-10-02 16:27:39 +0100
commit1b2654fb799c3d25ffcef4d31b5d026d359e2f8f (patch)
tree0397fdf24f286715e26a0e63bddaa0502f64caf7
parentb63a31170aee1d28267d83a4bc67b57708fb6b05 (diff)
downloadarmnn-1b2654fb799c3d25ffcef4d31b5d026d359e2f8f.tar.gz
IVGCVSW-5985 Remove deprecated code
* Removes deprecated AddLayer, IsLayerSupported functions * Marks the whole LayerVisitor class as deprecated not just the constructor. This required to wrap all Accept functions in a no deprecate macro because the LayerVisitor is used as a parameter in there * Removes usage of deprecated LayerVisitor and replaces it with ExecuteStrategy. This required a few structural changes in the unit tests * Adds a default implementation for IStrategy called StrategyBase * Changes pyarmnn to use non deprecated constructor for INetworkProperties and adds related unit test * Marks usage of deprecated code in pyarmnn as deprecated. This required to extend INetworkProperties to allow backwards compatibility * Removes deprecated functions from CpuAcc, GpuAcc and Ref backends Note: This patch breaks compatibility with backends that are not updated in this patch !android-nn-driver:6325 Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Id13b6f37a74d26eadeda2da1dc92915e725ed5a5
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/BackendHelper.hpp46
-rw-r--r--include/armnn/Deprecated.hpp4
-rw-r--r--include/armnn/Descriptors.hpp17
-rw-r--r--include/armnn/ILayerVisitor.hpp70
-rw-r--r--include/armnn/INetwork.hpp89
-rw-r--r--include/armnn/IRuntime.hpp4
-rw-r--r--include/armnn/LayerSupport.hpp23
-rw-r--r--include/armnn/LayerVisitorBase.hpp23
-rw-r--r--include/armnn/StrategyBase.hpp55
-rw-r--r--include/armnn/Types.hpp12
-rw-r--r--include/armnn/TypesUtils.hpp11
-rw-r--r--include/armnn/backends/IBackendInternal.hpp21
-rw-r--r--include/armnn/backends/ILayerSupport.hpp36
-rw-r--r--python/pyarmnn/README.md6
-rw-r--r--python/pyarmnn/src/pyarmnn/__init__.py4
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i1
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i1
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i1
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i5
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i55
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i4
-rw-r--r--python/pyarmnn/test/test_runtime.py25
-rw-r--r--src/armnn/BackendHelper.cpp62
-rw-r--r--src/armnn/CompatibleTypes.hpp3
-rw-r--r--src/armnn/ISubgraphViewConverter.hpp5
-rw-r--r--src/armnn/LayerSupport.cpp69
-rw-r--r--src/armnn/Network.cpp162
-rw-r--r--src/armnn/Network.hpp48
-rw-r--r--src/armnn/SerializeLayerParameters.cpp10
-rw-r--r--src/armnn/SerializeLayerParameters.hpp5
-rw-r--r--src/armnn/SubgraphView.hpp6
-rw-r--r--src/armnn/layers/AbsLayer.cpp2
-rw-r--r--src/armnn/layers/AbsLayer.hpp3
-rw-r--r--src/armnn/layers/ActivationLayer.cpp2
-rw-r--r--src/armnn/layers/ActivationLayer.hpp2
-rw-r--r--src/armnn/layers/AdditionLayer.cpp2
-rw-r--r--src/armnn/layers/AdditionLayer.hpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp2
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp2
-rw-r--r--src/armnn/layers/CastLayer.cpp2
-rw-r--r--src/armnn/layers/CastLayer.hpp2
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.cpp3
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.hpp2
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp2
-rw-r--r--src/armnn/layers/ComparisonLayer.hpp2
-rw-r--r--src/armnn/layers/ConcatLayer.cpp2
-rw-r--r--src/armnn/layers/ConcatLayer.hpp2
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.hpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.hpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.hpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.hpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.hpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp2
-rw-r--r--src/armnn/layers/DebugLayer.cpp2
-rw-r--r--src/armnn/layers/DebugLayer.hpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.hpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp2
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp2
-rw-r--r--src/armnn/layers/DequantizeLayer.hpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp2
-rw-r--r--src/armnn/layers/DivisionLayer.cpp2
-rw-r--r--src/armnn/layers/DivisionLayer.hpp2
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp2
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.hpp2
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp2
-rw-r--r--src/armnn/layers/FillLayer.cpp2
-rw-r--r--src/armnn/layers/FillLayer.hpp2
-rw-r--r--src/armnn/layers/FloorLayer.cpp2
-rw-r--r--src/armnn/layers/FloorLayer.hpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp2
-rw-r--r--src/armnn/layers/GatherLayer.cpp2
-rw-r--r--src/armnn/layers/GatherLayer.hpp2
-rw-r--r--src/armnn/layers/InputLayer.cpp2
-rw-r--r--src/armnn/layers/InputLayer.hpp2
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.hpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.hpp2
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.cpp2
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.hpp2
-rw-r--r--src/armnn/layers/LstmLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.hpp2
-rw-r--r--src/armnn/layers/MapLayer.cpp2
-rw-r--r--src/armnn/layers/MapLayer.hpp2
-rw-r--r--src/armnn/layers/MaximumLayer.cpp2
-rw-r--r--src/armnn/layers/MaximumLayer.hpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp2
-rw-r--r--src/armnn/layers/MeanLayer.hpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp2
-rw-r--r--src/armnn/layers/MemImportLayer.cpp2
-rw-r--r--src/armnn/layers/MemImportLayer.hpp2
-rw-r--r--src/armnn/layers/MergeLayer.cpp2
-rw-r--r--src/armnn/layers/MergeLayer.hpp3
-rw-r--r--src/armnn/layers/MinimumLayer.cpp2
-rw-r--r--src/armnn/layers/MinimumLayer.hpp3
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp2
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp3
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/NormalizationLayer.hpp3
-rw-r--r--src/armnn/layers/OutputLayer.cpp2
-rw-r--r--src/armnn/layers/OutputLayer.hpp3
-rw-r--r--src/armnn/layers/PadLayer.cpp2
-rw-r--r--src/armnn/layers/PadLayer.hpp3
-rw-r--r--src/armnn/layers/PermuteLayer.cpp2
-rw-r--r--src/armnn/layers/PermuteLayer.hpp3
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp2
-rw-r--r--src/armnn/layers/Pooling2dLayer.hpp3
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp3
-rw-r--r--src/armnn/layers/PreluLayer.cpp2
-rw-r--r--src/armnn/layers/PreluLayer.hpp3
-rw-r--r--src/armnn/layers/QLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp3
-rw-r--r--src/armnn/layers/RankLayer.cpp3
-rw-r--r--src/armnn/layers/RankLayer.hpp3
-rw-r--r--src/armnn/layers/ReduceLayer.cpp2
-rw-r--r--src/armnn/layers/ReduceLayer.hpp3
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.hpp3
-rw-r--r--src/armnn/layers/ResizeLayer.cpp2
-rw-r--r--src/armnn/layers/ResizeLayer.hpp3
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp2
-rw-r--r--src/armnn/layers/RsqrtLayer.hpp3
-rw-r--r--src/armnn/layers/ShapeLayer.cpp2
-rw-r--r--src/armnn/layers/ShapeLayer.hpp3
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SliceLayer.hpp3
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/SoftmaxLayer.hpp3
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.hpp3
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.hpp3
-rw-r--r--src/armnn/layers/SplitterLayer.cpp2
-rw-r--r--src/armnn/layers/SplitterLayer.hpp3
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StackLayer.hpp3
-rw-r--r--src/armnn/layers/StandInLayer.cpp2
-rw-r--r--src/armnn/layers/StandInLayer.hpp3
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp2
-rw-r--r--src/armnn/layers/StridedSliceLayer.hpp3
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp2
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp3
-rw-r--r--src/armnn/layers/SwitchLayer.cpp2
-rw-r--r--src/armnn/layers/SwitchLayer.hpp3
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp3
-rw-r--r--src/armnn/layers/TransposeLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeLayer.hpp3
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp2
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp3
-rw-r--r--src/armnn/layers/UnmapLayer.cpp2
-rw-r--r--src/armnn/layers/UnmapLayer.hpp3
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp236
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp358
-rw-r--r--src/armnn/test/NetworkTests.cpp118
-rw-r--r--src/armnn/test/OptimizerTests.cpp67
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.cpp8
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.hpp56
-rw-r--r--src/armnn/test/TestLayerVisitor.cpp56
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp19
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp4
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp30
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp4
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.hpp24
-rw-r--r--src/armnnDeserializer/Deserializer.cpp3
-rw-r--r--src/armnnDeserializer/test/DeserializeComparison.cpp9
-rw-r--r--src/armnnSerializer/Serializer.cpp70
-rw-r--r--src/armnnSerializer/Serializer.hpp24
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp75
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp76
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp4
-rw-r--r--src/backends/backendsCommon/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/IBackendInternal.cpp41
-rw-r--r--src/backends/backendsCommon/IBackendInternal.hpp9
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp78
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp44
-rw-r--r--src/backends/backendsCommon/TensorHandle.hpp21
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp10
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp7
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp37
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp24
-rw-r--r--src/backends/backendsCommon/WorkloadFactoryBase.hpp16
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp4
-rw-r--r--src/backends/backendsCommon/test/MockBackend.cpp5
-rw-r--r--src/backends/backendsCommon/test/MockBackend.hpp1
-rw-r--r--src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp5
-rw-r--r--src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp1
-rw-r--r--src/backends/cl/ClBackend.cpp5
-rw-r--r--src/backends/cl/ClBackend.hpp1
-rw-r--r--src/backends/cl/ClLayerSupport.cpp59
-rw-r--r--src/backends/cl/ClLayerSupport.hpp32
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp65
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp24
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp4
-rw-r--r--src/backends/neon/NeonBackend.cpp5
-rw-r--r--src/backends/neon/NeonBackend.hpp1
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp59
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp32
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp65
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp24
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp4
-rw-r--r--src/backends/reference/RefBackend.cpp15
-rw-r--r--src/backends/reference/RefBackend.hpp3
-rw-r--r--src/backends/reference/RefLayerSupport.cpp119
-rw-r--r--src/backends/reference/RefLayerSupport.hpp37
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp58
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp24
-rw-r--r--src/backends/reference/backend.mk1
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt2
-rw-r--r--src/backends/reference/workloads/Decoders.hpp7
-rw-r--r--src/backends/reference/workloads/Encoders.hpp10
-rw-r--r--src/backends/reference/workloads/RefResizeBilinearWorkload.cpp45
-rw-r--r--src/backends/reference/workloads/RefResizeBilinearWorkload.hpp24
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp1
238 files changed, 1199 insertions, 2280 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0989e12ad5..bebee7ffb8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -193,6 +193,7 @@ list(APPEND armnn_sources
include/armnn/NetworkFwd.hpp
include/armnn/Optional.hpp
include/armnn/QuantizedLstmParams.hpp
+ include/armnn/StrategyBase.hpp
include/armnn/Tensor.hpp
include/armnn/TensorFwd.hpp
include/armnn/Threadpool.hpp
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 80676deed2..0bd37dcf29 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -28,11 +28,6 @@ public:
bool IsBackendRegistered() const;
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -164,12 +159,6 @@ public:
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
@@ -190,24 +179,12 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
- bool IsGatherSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
@@ -272,12 +249,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
- bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -356,21 +327,11 @@ public:
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
- bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
@@ -395,11 +356,6 @@ public:
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional());
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
- bool IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
@@ -489,7 +445,7 @@ Optional<const BackendOptions::BackendOption> GetCapability(const std::string& b
const armnn::BackendId& backend);
/// Convenience function to check a capability on a backend
-ARMNN_DEPRECATED_MSG("This function has been deprecated in favour of GetBackendCapability")
+ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetBackendCapability", "22.05")
bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability);
}
diff --git a/include/armnn/Deprecated.hpp b/include/armnn/Deprecated.hpp
index 2b9240fbc4..c493adb308 100644
--- a/include/armnn/Deprecated.hpp
+++ b/include/armnn/Deprecated.hpp
@@ -34,13 +34,15 @@
# define ARMNN_NO_DEPRECATE_WARN_END
#endif
-#define ARMNN_SUPRESS_DEPRECATE_WARNING(func) \
+#define ARMNN_SUPPRESS_DEPRECATE_WARNING(func) \
ARMNN_NO_DEPRECATE_WARN_BEGIN \
func; \
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_DEPRECATED [[deprecated]]
#define ARMNN_DEPRECATED_MSG(message) [[deprecated(message)]]
+#define ARMNN_DEPRECATED_MSG_REMOVAL_DATE(message, removed_in_release) \
+[[deprecated("Expected to be removed in release " #removed_in_release ". " message)]]
#if defined(__GNUC__) && (__GNUC__ < 6)
# define ARMNN_DEPRECATED_ENUM
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 9a5128a127..b412bbdcc9 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -243,14 +243,6 @@ private:
uint32_t** m_ViewSizes;
};
-template <typename TensorShapeIt>
-ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
-OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first,
- TensorShapeIt last,
- unsigned int concatenationDimension)
-{
- return CreateDescriptorForConcatenation(first, last, concatenationDimension);
-}
/// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
/// concatenation of a number of input tensors.
@@ -402,7 +394,7 @@ struct FullyConnectedDescriptor : BaseDescriptor
}
/// Get the number of views/inputs.
- ARMNN_DEPRECATED_MSG("Use GetNumInputs instead")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
uint32_t GetNumViews() const;
/// Get the number of views/inputs.
@@ -839,7 +831,10 @@ struct GatherDescriptor : BaseDescriptor
};
/// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
-struct ResizeBilinearDescriptor : BaseDescriptor
+struct ARMNN_DEPRECATED_MSG_REMOVAL_DATE(
+ "ResizeBilinearDescriptor is not supported anymore. Use ResizeDescriptor instead.",
+ "22.08")
+ ResizeBilinearDescriptor : BaseDescriptor
{
ResizeBilinearDescriptor()
: m_TargetWidth(0)
@@ -849,6 +844,7 @@ struct ResizeBilinearDescriptor : BaseDescriptor
, m_HalfPixelCenters(false)
{}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
bool operator ==(const ResizeBilinearDescriptor& rhs) const
{
return m_TargetWidth == rhs.m_TargetWidth &&
@@ -857,6 +853,7 @@ struct ResizeBilinearDescriptor : BaseDescriptor
m_AlignCorners == rhs.m_AlignCorners &&
m_HalfPixelCenters == rhs.m_HalfPixelCenters;
}
+ ARMNN_NO_DEPRECATE_WARN_END
/// Target width value.
uint32_t m_TargetWidth;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index cceb545a3a..a57db3ce18 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -13,21 +13,13 @@
namespace armnn
{
-class ILayerVisitor
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable IStrategy instead.", "22.05") ILayerVisitor
{
protected:
- ARMNN_DEPRECATED_MSG("Use ABI stable IStrategy instead.")
ILayerVisitor() {}
virtual ~ILayerVisitor() {}
public:
- /// Function an absolute layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
- virtual void VisitAbsLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
/// Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
@@ -93,13 +85,7 @@ public:
/// @param name - Optional name for the layer.
virtual void VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& concatDescriptor,
- const char* name = nullptr)
- {
- // default implementation to ease transition while MergerLayer is being deprecated
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- VisitMergerLayer(layer, concatDescriptor, name);
- ARMNN_NO_DEPRECATE_WARN_END
- }
+ const char* name = nullptr) = 0;
/// Function a layer with no inputs and a single output, which always corresponds to
/// the passed in constant tensor should call back to when its Accept(ILayerVisitor&) function is invoked.
@@ -178,13 +164,6 @@ public:
const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name = nullptr) = 0;
- /// Function an Equal layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
- virtual void VisitEqualLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
/// Function a fill layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param fillDescriptor - Description of the layer
@@ -216,7 +195,7 @@ public:
/// @param weights - Tensor for the weights data.
/// @param biases - Optional tensor for the bias data.
/// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitFullyConnectedLayer without ConstTensors")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use VisitFullyConnectedLayer without ConstTensors", "22.05")
virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
@@ -225,26 +204,12 @@ public:
/// Function a Gather layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitGatherLayer with descriptor instead")
- virtual void VisitGatherLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
- /// Function a Gather layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
/// @param gatherDescriptor - Parameters for the gather operation.
/// @param name - Optional name for the layer.
virtual void VisitGatherLayer(const IConnectableLayer* layer,
const GatherDescriptor& gatherDescriptor,
const char* name = nullptr) = 0;
- /// Function a Greater layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
- virtual void VisitGreaterLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
/// Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified
@@ -318,18 +283,6 @@ public:
virtual void VisitMergeLayer(const IConnectableLayer* layer,
const char* name = nullptr) = 0;
- /// Function that a merger layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
- /// process. Number of Views must be equal to the number of inputs, and their order
- /// must match - e.g. first view corresponds to the first input, second view to the
- /// second input, etc....
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead")
- virtual void VisitMergerLayer(const IConnectableLayer* layer,
- const MergerDescriptor& mergerDescriptor,
- const char* name = nullptr) = 0;
-
/// Function a Minimum layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param name - Optional name for the layer.
@@ -437,15 +390,6 @@ public:
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) = 0;
- /// Function that a resize bilinear layer should call back to when its Accept(ILayerVisitor&) function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param resizeDesc - Parameters for the resize operation.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
- virtual void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) = 0;
-
/// Function that a resize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param resizeDescriptor - Parameters for the resize operation.
@@ -454,14 +398,6 @@ public:
const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) = 0;
- /// Function a Reciprocal of square root layer should call back to when its Accept(ILayerVisitor&)
- /// function is invoked.
- /// @param layer - pointer to the layer which is calling back to this visit function.
- /// @param name - Optional name for the layer.
- ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
- virtual void VisitRsqrtLayer(const IConnectableLayer* layer,
- const char* name = nullptr) = 0;
-
/// Function that a slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 8ec8de0600..a8e6cfc0e3 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -88,8 +88,17 @@ public:
/// Returns the unique id of the layer
virtual LayerGuid GetGuid() const = 0;
+ // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
+ // the deprecated ILayerVisitor which is used in the function.
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
/// Apply a visitor to this layer
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
+ "Accept function is deprecated. Use IStrategy in combination with "
+ "ExecuteStrategy instead, which is an ABI/API stable version of the "
+ "visitor pattern.",
+ "22.05")
virtual void Accept(ILayerVisitor& visitor) const = 0;
+ ARMNN_NO_DEPRECATE_WARN_END
/// Apply a visitor to this layer
virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
@@ -230,12 +239,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -271,19 +280,6 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr);
-
/// Adds a Dequantize layer to the network.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
@@ -337,13 +333,13 @@ public:
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddFullyConnectedLayer overload is deprecated", "22.05")
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const Optional<ConstTensor>& weights,
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddFullyConnectedLayer overload is deprecated", "22.05")
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const Optional<ConstTensor>& biases,
@@ -414,23 +410,6 @@ public:
/// @return - Interface for configuring the layer.
IConnectableLayer* AddMergeLayer(const char* name = nullptr);
- /// Adds a concat layer to the network.
- /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
- /// process. Number of Views must be equal to the number of inputs, and their order
- /// must match - e.g. first view corresponds to the first input, second view to the
- /// second input, etc....
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
- IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name = nullptr);
-
- /// Add absolute layer to the network.
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddAbsLayer(const char* name = nullptr);
-
/// Adds an addition layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
@@ -460,14 +439,6 @@ public:
/// @return - Interface for configuring the layer.
IConnectableLayer* AddRankLayer(const char* name = nullptr);
- /// Adds a resize bilinear layer to the network.
- /// @param resizeDesc - Parameters for the resize operation.
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
- IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr);
-
/// Adds a resize layer to the network.
/// @param resizeDescriptor - Parameters for the resize operation.
/// @param name - Optional name for the layer.
@@ -608,30 +579,6 @@ public:
/// @return - Interface for configuring the layer.
IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
- /// Add a Greater layer to the network.
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
-
- /// Add a Equal layer to the network.
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddEqualLayer(const char* name = nullptr);
-
- /// Add Reciprocal of square root layer to the network.
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
-
- /// Add Gather layer to the network.
- /// @param name - Optional name for the layer.
- /// @return - Interface for configuring the layer.
- ARMNN_DEPRECATED_MSG("Use AddGatherLayer with descriptor instead")
- IConnectableLayer* AddGatherLayer(const char* name = nullptr);
-
/// Add Gather layer to the network.
/// @param descriptor - Description of the gather layer.
/// @param name - Optional name for the layer.
@@ -722,7 +669,17 @@ public:
IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& descriptor,
const char* name = nullptr);
+ // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
+ // the deprecated ILayerVisitor which is used in the function.
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ /// Apply a visitor to this layer
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
+ "Accept function is deprecated. Use IStrategy in combination with "
+ "ExecuteStrategy instead, which is an ABI/API stable version of the "
+ "visitor pattern.",
+ "22.05")
void Accept(ILayerVisitor& visitor) const;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const;
diff --git a/include/armnn/IRuntime.hpp b/include/armnn/IRuntime.hpp
index 908fe7692d..a46830c95a 100644
--- a/include/armnn/IRuntime.hpp
+++ b/include/armnn/IRuntime.hpp
@@ -31,7 +31,7 @@ using IRuntimePtr = std::unique_ptr<IRuntime, void(*)(IRuntime* runtime)>;
struct INetworkProperties
{
- ARMNN_DEPRECATED_MSG("Please use INetworkProperties constructor with MemorySource argument")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Please use INetworkProperties constructor with MemorySource argument", "22.02")
INetworkProperties(bool importEnabled = false,
bool exportEnabled = false,
bool asyncEnabled = false,
@@ -45,7 +45,7 @@ struct INetworkProperties
m_OutputSource(m_ExportEnabled ? MemorySource::Malloc : MemorySource::Undefined)
{}
- ARMNN_DEPRECATED_MSG("Please use INetworkProperties constructor without numThreads argument")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Please use INetworkProperties constructor without numThreads argument", "22.02")
INetworkProperties(bool asyncEnabled,
MemorySource inputSource,
MemorySource outputSource,
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 6f1eb0347b..03b706fafd 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -214,14 +214,6 @@ bool IsMergeSupported(const BackendId& backend,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-bool IsMergerSupported(const BackendId& backend,
- const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsMinimumSupported(const BackendId& backend,
@@ -318,14 +310,6 @@ bool IsReshapeSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
-ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-bool IsResizeBilinearSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsResizeSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -364,13 +348,6 @@ bool IsSpaceToDepthSupported(const BackendId& backend,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-bool IsSplitterSupported(const BackendId& backend,
- const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 43fc7b976e..3d43725527 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -30,8 +30,6 @@ protected:
virtual ~LayerVisitorBase() {}
public:
- void VisitAbsLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
void VisitActivationLayer(const IConnectableLayer*,
const ActivationDescriptor&,
@@ -99,9 +97,6 @@ public:
const ElementwiseUnaryDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
- void VisitEqualLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
void VisitFillLayer(const IConnectableLayer*,
const FillDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
@@ -119,17 +114,10 @@ public:
const Optional<ConstTensor>&,
const char*) override { DefaultPolicy::Apply(__func__); }
- ARMNN_DEPRECATED_MSG("Use VisitGatherLayer with descriptor instead")
- void VisitGatherLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
void VisitGatherLayer(const IConnectableLayer*,
const GatherDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
- void VisitGreaterLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
void VisitInputLayer(const IConnectableLayer*,
LayerBindingId,
const char*) override { DefaultPolicy::Apply(__func__); }
@@ -165,10 +153,6 @@ public:
void VisitMergeLayer(const IConnectableLayer*,
const char*) override { DefaultPolicy::Apply(__func__); }
- void VisitMergerLayer(const IConnectableLayer*,
- const MergerDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
void VisitMinimumLayer(const IConnectableLayer*,
const char*) override { DefaultPolicy::Apply(__func__); }
@@ -221,17 +205,10 @@ public:
const ReshapeDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
- void VisitResizeBilinearLayer(const IConnectableLayer*,
- const ResizeBilinearDescriptor&,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
void VisitResizeLayer(const IConnectableLayer*,
const ResizeDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
- void VisitRsqrtLayer(const IConnectableLayer*,
- const char*) override { DefaultPolicy::Apply(__func__); }
-
void VisitSliceLayer(const IConnectableLayer*,
const SliceDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/include/armnn/StrategyBase.hpp b/include/armnn/StrategyBase.hpp
new file mode 100644
index 0000000000..78f393f44f
--- /dev/null
+++ b/include/armnn/StrategyBase.hpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+
+#include <armnn/INetwork.hpp>
+#include <armnn/IStrategy.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+namespace armnn
+{
+
+struct ThrowingStrategy
+{
+ void Apply(const std::string& errorMessage = "") { throw UnimplementedException(errorMessage); };
+};
+
+struct NoThrowStrategy
+{
+ void Apply(const std::string&) {};
+};
+
+/// Strategy base class with empty implementations.
+template <typename DefaultStrategy>
+class StrategyBase : public IStrategy
+{
+protected:
+ virtual ~StrategyBase() {};
+
+public:
+ virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
+ {
+ armnn::IgnoreUnused(descriptor, constants, id, name);
+ switch (layer->GetType())
+ {
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
+ }
+
+protected:
+ DefaultStrategy m_DefaultStrategy;
+
+};
+
+
+} // namespace armnn
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index ef52368365..e713b8989e 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -40,14 +40,10 @@ enum class DataType
Signed32 = 3,
Boolean = 4,
QSymmS16 = 5,
- QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
- QSymmS8 = 7,
- QAsymmS8 = 8,
- BFloat16 = 9,
- Signed64 = 10,
-
- QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
- QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
+ QSymmS8 = 6,
+ QAsymmS8 = 7,
+ BFloat16 = 8,
+ Signed64 = 9,
};
enum class DataLayout
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index b644daafd8..9bd9c8148f 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -149,9 +149,6 @@ constexpr unsigned int GetDataTypeSize(DataType dataType)
case DataType::QAsymmU8: return 1U;
case DataType::QAsymmS8: return 1U;
case DataType::QSymmS8: return 1U;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case DataType::QuantizedSymm8PerAxis: return 1U;
- ARMNN_NO_DEPRECATE_WARN_END
case DataType::QSymmS16: return 2U;
case DataType::Boolean: return 1U;
default: return 0U;
@@ -201,9 +198,6 @@ constexpr const char* GetDataTypeName(DataType dataType)
case DataType::QAsymmU8: return "QAsymmU8";
case DataType::QAsymmS8: return "QAsymmS8";
case DataType::QSymmS8: return "QSymmS8";
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case DataType::QuantizedSymm8PerAxis: return "QSymm8PerAxis";
- ARMNN_NO_DEPRECATE_WARN_END
case DataType::QSymmS16: return "QSymm16";
case DataType::Signed32: return "Signed32";
case DataType::Boolean: return "Boolean";
@@ -268,12 +262,9 @@ constexpr bool IsQuantizedType()
constexpr bool IsQuantized8BitType(DataType dataType)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return dataType == DataType::QAsymmU8 ||
dataType == DataType::QAsymmS8 ||
- dataType == DataType::QSymmS8 ||
- dataType == DataType::QuantizedSymm8PerAxis;
- ARMNN_NO_DEPRECATE_WARN_END
+ dataType == DataType::QSymmS8;
}
constexpr bool IsQuantizedType(DataType dataType)
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 2045ba2fc0..f4fe678a5b 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -98,25 +98,6 @@ public:
using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
- using GraphUniquePtr = std::unique_ptr<Graph>;
- using SubgraphViewUniquePtr = std::unique_ptr<SubgraphView>;
-
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- using ISubGraphConverterPtr ARMNN_DEPRECATED_MSG("This type is no longer supported")
- = std::unique_ptr<ISubGraphConverter>;
- using SubGraphUniquePtr ARMNN_DEPRECATED_MSG("SubGraph is deprecated, use SubgraphView instead")
- = std::unique_ptr<SubGraph>;
-
- ARMNN_DEPRECATED_MSG("This method is no longer supported")
- virtual ISubGraphConverterPtr CreateSubGraphConverter(const std::shared_ptr<SubGraph>& subGraph) const;
-
- ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
- virtual Optimizations GetOptimizations() const;
-
- ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
- virtual SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph, bool& optimizationAttempted) const;
- ARMNN_NO_DEPRECATE_WARN_END
-
virtual IMemoryManagerUniquePtr CreateMemoryManager() const;
virtual IWorkloadFactoryPtr CreateWorkloadFactory(
@@ -194,7 +175,7 @@ public:
};
/// Returns true if backend support the capability false otherwise
- ARMNN_DEPRECATED_MSG("This function has been deprecated in favour of GetCapability")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetCapability", "22.05")
virtual bool HasCapability(BackendCapability /*capabilityClass*/) const { return false; }
/// Signals the backend to use a custom memory allocator provided by the user
diff --git a/include/armnn/backends/ILayerSupport.hpp b/include/armnn/backends/ILayerSupport.hpp
index 3744f316b1..2fbb081fbf 100644
--- a/include/armnn/backends/ILayerSupport.hpp
+++ b/include/armnn/backends/ILayerSupport.hpp
@@ -27,10 +27,6 @@ protected:
virtual ~ILayerSupport() {}
public:
- virtual bool IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
virtual bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -162,11 +158,6 @@ public:
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- virtual bool IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
virtual bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -190,19 +181,9 @@ public:
virtual bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
- virtual bool IsGatherSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- virtual bool IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
virtual bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -267,11 +248,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
virtual bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
@@ -350,19 +326,11 @@ public:
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- virtual bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
virtual bool IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- virtual bool IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
virtual bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -388,10 +356,6 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
- virtual bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 0c88cccac8..ae263463da 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -72,9 +72,9 @@ PyArmNN can also be built using the provided python scripts only. The advantage
*ARMNN_INCLUDE* and *ARMNN_LIB* are mandatory and should point to Arm NN includes and libraries against which you will be generating the wrappers. *SWIG_EXECUTABLE* should only be set if you have multiple versions of SWIG installed or you used a custom location for your installation:
```bash
-$ export SWIG_EXECUTABLE=<path_to_swig>
-$ export ARMNN_INCLUDE=<path_to_armnn_include>
-$ export ARMNN_LIB=<path_to_armnn_libraries>
+$ export SWIG_EXECUTABLE=/full/path/to/swig/executable
+$ export ARMNN_INCLUDE=/full/path/to/armnn/include:/full/path/to/armnn/profiling/common/include
+$ export ARMNN_LIB=/path/to/libs
```
##### 2. Clean and build SWIG wrappers:
diff --git a/python/pyarmnn/src/pyarmnn/__init__.py b/python/pyarmnn/src/pyarmnn/__init__.py
index 5cb8bfb6cd..13fdf95c6f 100644
--- a/python/pyarmnn/src/pyarmnn/__init__.py
+++ b/python/pyarmnn/src/pyarmnn/__init__.py
@@ -67,6 +67,10 @@ from ._generated.pyarmnn import IProfiler
from ._generated.pyarmnn import DataType_Float16, DataType_Float32, DataType_QAsymmU8, DataType_Signed32, \
DataType_Boolean, DataType_QSymmS16, DataType_QSymmS8, DataType_QAsymmS8
from ._generated.pyarmnn import DataLayout_NCHW, DataLayout_NHWC
+from ._generated.pyarmnn import MemorySource_Malloc, MemorySource_Undefined, MemorySource_DmaBuf, \
+ MemorySource_DmaBufProtected
+from ._generated.pyarmnn import ProfilingDetailsMethod_Undefined, ProfilingDetailsMethod_DetailsWithEvents, \
+ ProfilingDetailsMethod_DetailsOnly
from ._generated.pyarmnn import ActivationFunction_Abs, ActivationFunction_BoundedReLu, ActivationFunction_LeakyReLu, \
ActivationFunction_Linear, ActivationFunction_ReLu, ActivationFunction_Sigmoid, ActivationFunction_SoftReLu, \
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i b/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i
index 073fadaba9..bc8228a5eb 100644
--- a/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i
@@ -6,7 +6,6 @@
%{
#include "armnnDeserializer/IDeserializer.hpp"
#include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
#include "armnn/INetwork.hpp"
#include "armnn/Exceptions.hpp"
#include <string>
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
index d2d79cc627..3ed5d6b8fd 100644
--- a/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
@@ -6,7 +6,6 @@
%{
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
#include "armnn/INetwork.hpp"
%}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
index 00b835bfb3..a050722bb9 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
@@ -5,7 +5,6 @@
%{
#include "armnn/Descriptors.hpp"
#include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
%}
namespace std {
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index 7dc88ac176..f4581ca5ec 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -6,7 +6,6 @@
#include "armnn/INetwork.hpp"
#include "armnn/BackendId.hpp"
#include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
#include "armnn/Optional.hpp"
#include <fstream>
%}
@@ -989,7 +988,7 @@ public:
const armnn::ConstTensor& weights,
armnn::ConstTensor* biases = nullptr,
const char* name = nullptr) {
-
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
if (biases) {
return $self->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
armnn::Optional<armnn::ConstTensor>(*biases), name);
@@ -997,7 +996,7 @@ public:
return $self->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
armnn::Optional<armnn::ConstTensor>(), name);
}
-
+ ARMNN_NO_DEPRECATE_WARN_END
}
%feature("docstring",
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
index ec65cc010a..e56464dd2f 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
@@ -4,6 +4,7 @@
//
%{
#include "armnn/IRuntime.hpp"
+#include "armnn/Deprecated.hpp"
#include <iostream>
#include <ostream>
#include <sstream>
@@ -97,25 +98,43 @@ struct CreationOptions
ExternalProfilingOptions m_ProfilingOptions;
};
+%{
+typedef armnn::INetworkProperties INetworkProperties;
+%}
+
namespace armnn
{
+%nodefaultctor INetworkProperties;
struct INetworkProperties
{
%feature("docstring",
- "
+ "
Structure for holding network properties.
Contains:
- m_ImportEnabled (bool): Enable import.
-
- m_ExportEnabled (bool): Enable export.
+ m_AsyncEnabled (bool): Enable asynchronous execution of multiple network.
+ m_InputSource (MemorySource): When inputs are imported this defines the type of the imported memory.
+ m_OutputSource (MemorySource): When outputs are imported this defines the type of the imported memory.
+ m_ProfilingEnabled (bool): Enable profiling.
+ ProfilingDetailsMethod (ProfilingDetailsMethod): Customize profiling details.
") INetworkProperties;
- INetworkProperties(bool importEnabled = false, bool exportEnabled = false);
+ INetworkProperties(bool asyncEnabled,
+ MemorySource inputSource,
+ MemorySource outputSource,
+ bool profilingEnabled = false,
+ ProfilingDetailsMethod detailsMethod = ProfilingDetailsMethod::Undefined);
+
+
+ const bool m_AsyncEnabled;
+
+ const bool m_ProfilingEnabled;
+
+ const ProfilingDetailsMethod m_OutputNetworkDetailsMethod;
- const bool m_ImportEnabled;
- const bool m_ExportEnabled;
+ const MemorySource m_InputSource;
+ const MemorySource m_OutputSource;
};
%feature("docstring",
@@ -293,5 +312,27 @@ public:
}
+%extend INetworkProperties {
+ %feature("docstring",
+ "
+ Structure for holding network properties.
+
+ Contains:
+ m_ImportEnabled (bool): Enable import.
+
+ m_ExportEnabled (bool): Enable export.
+
+ ") INetworkProperties;
+ INetworkProperties(bool importEnabled = false, bool exportEnabled = false) {
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ return new INetworkProperties(importEnabled, exportEnabled);
+ ARMNN_NO_DEPRECATE_WARN_END
+ }
+ %pythonprepend INetworkProperties(bool, bool) %{
+ import warnings
+ warnings.warn("Deprecated: Use constructor with MemorySource argument instead.", DeprecationWarning)
+ %}
+}
+
}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
index b838fce53d..83da4558fb 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
@@ -4,12 +4,10 @@
//
%{
#include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
%}
%include <typemaps/permutation_vector.i>
-
namespace armnn
{
@@ -106,12 +104,10 @@ Returns:
") GetSupportedBackends;
-%ignore ProfilingGuid;
%ignore PermutationVector;
#define ARMNN_DEPRECATED_ENUM // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp.
#define ARMNN_DEPRECATED_ENUM_MSG(message) // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp.
%include "armnn/Types.hpp"
-%include "ProfilingGuid.hpp"
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index ff0ad40b55..295c870370 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -3,6 +3,7 @@
import os
import pytest
+import warnings
import numpy as np
import pyarmnn as ann
@@ -156,6 +157,30 @@ def test_load_network_properties_provided(random_runtime):
assert "" == messages
assert net_id == 0
+def test_network_properties_constructor(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ inputSource = ann.MemorySource_Undefined
+ outputSource = ann.MemorySource_Undefined
+ properties = ann.INetworkProperties(True, inputSource, outputSource)
+ assert properties.m_AsyncEnabled == True
+ assert properties.m_ProfilingEnabled == False
+ assert properties.m_OutputNetworkDetailsMethod == ann.ProfilingDetailsMethod_Undefined
+ assert properties.m_InputSource == ann.MemorySource_Undefined
+ assert properties.m_OutputSource == ann.MemorySource_Undefined
+
+ net_id, messages = runtime.LoadNetwork(opt_network, properties)
+ assert "" == messages
+ assert net_id == 0
+
+def test_network_properties_deprecated_constructor():
+ with pytest.warns(DeprecationWarning):
+ warnings.warn("Deprecated: Use constructor with MemorySource argument instead.", DeprecationWarning)
def test_unload_network_fails_for_invalid_net_id(random_runtime):
preferred_backends = random_runtime[0]
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 1616fd1aad..cc792a06ef 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -136,15 +136,6 @@ bool LayerSupportHandle::IsBackendRegistered() const
return false;
}
-
-bool LayerSupportHandle::IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- // Call the IsXXXLayerSupport function of the specific backend.
- return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -388,14 +379,6 @@ bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
@@ -478,28 +461,12 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
-}
-
-bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported)
{
@@ -613,14 +580,6 @@ bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -758,13 +717,6 @@ bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
@@ -773,13 +725,6 @@ bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
-}
-
bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
@@ -820,13 +765,6 @@ bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
}
bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
-}
-
-bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
index 1a663d3e27..e24d5dfc4c 100644
--- a/src/armnn/CompatibleTypes.hpp
+++ b/src/armnn/CompatibleTypes.hpp
@@ -46,11 +46,8 @@ inline bool CompatibleTypes<uint8_t>(DataType dataType)
template<>
inline bool CompatibleTypes<int8_t>(DataType dataType)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
return dataType == DataType::QSymmS8
- || dataType == DataType::QuantizedSymm8PerAxis
|| dataType == DataType::QAsymmS8;
- ARMNN_NO_DEPRECATE_WARN_END
}
template<>
diff --git a/src/armnn/ISubgraphViewConverter.hpp b/src/armnn/ISubgraphViewConverter.hpp
index 34789a2b28..2e108e1f3b 100644
--- a/src/armnn/ISubgraphViewConverter.hpp
+++ b/src/armnn/ISubgraphViewConverter.hpp
@@ -25,9 +25,4 @@ public:
virtual std::vector<CompiledBlobPtr> CompileNetwork() = 0;
};
-///
-/// Old ISubGraphConverter definition kept for backward compatibility only.
-///
-using ISubGraphConverter ARMNN_DEPRECATED_MSG("This type is no longer supported") = ISubgraphViewConverter;
-
} // namespace armnn
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 9eaa97cebc..4cb7492e3a 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -305,18 +305,6 @@ bool IsFullyConnectedSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
}
-ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
-bool IsGatherSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- const GatherDescriptor descriptor{};
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output, descriptor);
-}
-
bool IsGatherSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -423,21 +411,6 @@ bool IsMergeSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
}
-ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-bool IsMergerSupported(const BackendId& backend,
- std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ARMNN_ASSERT(inputs.size() > 0);
-
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
- ARMNN_NO_DEPRECATE_WARN_END
-}
-
bool IsMinimumSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -589,36 +562,6 @@ bool IsResizeSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
}
-ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-bool IsResizeBilinearSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ResizeDescriptor descriptor;
- descriptor.m_Method = ResizeMethod::Bilinear;
-
- const TensorShape& outputShape = output.GetShape();
- descriptor.m_TargetWidth = outputShape[3];
- descriptor.m_TargetHeight = outputShape[2];
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
-}
-
-bool IsRsqrtSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsElementwiseUnarySupported,
- input,
- output,
- ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt));
-}
-
bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -649,18 +592,6 @@ bool IsSpaceToDepthSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
}
-ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-bool IsSplitterSupported(const BackendId& backend,
- const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
- ARMNN_NO_DEPRECATE_WARN_END
-}
-
bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4070802be8..a39b6b1a42 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -139,27 +139,6 @@ IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
}
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name)
-{
- Optional<ConstTensor> biases;
- return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
-}
-
-
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name)
-{
- return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
- armnn::Optional<ConstTensor>(biases), name);
-}
-
-
IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
{
return pNetworkImpl->AddDequantizeLayer(name);
@@ -264,17 +243,6 @@ IConnectableLayer* INetwork::AddMergeLayer(const char* name)
return pNetworkImpl->AddMergeLayer(name);
}
-IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name)
-{
- return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
-}
-
-IConnectableLayer* INetwork::AddAbsLayer(const char* name)
-{
- return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
{
return pNetworkImpl->AddAdditionLayer(name);
@@ -300,20 +268,6 @@ IConnectableLayer* INetwork::AddRankLayer(const char* name)
return pNetworkImpl->AddRankLayer(name);
}
-IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
- const char* name)
-{
- ResizeDescriptor resizeDescriptor;
- resizeDescriptor.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
- resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
- resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
- resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
- resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
-
- return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
-}
-
IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
const char* name)
{
@@ -426,27 +380,6 @@ IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
return pNetworkImpl->AddMinimumLayer(name);
}
-IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
-{
- return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
-}
-
-IConnectableLayer* INetwork::AddEqualLayer(const char* name)
-{
- return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
-}
-
-IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
-{
- return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-IConnectableLayer* INetwork::AddGatherLayer(const char* name)
-{
- GatherDescriptor gatherDescriptor{};
- return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
-}
-
IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
const char* name)
{
@@ -527,10 +460,12 @@ IConnectableLayer* INetwork::AddChannelShuffleLayer(const ChannelShuffleDescript
return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void INetwork::Accept(ILayerVisitor& visitor) const
{
return pNetworkImpl->Accept(visitor);
}
+ARMNN_NO_DEPRECATE_WARN_END
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
@@ -1774,23 +1709,6 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
- // Run backend specific optimizations (deprecated)
- for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
- {
- auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
- auto backendPtr = factoryFun();
- ARMNN_ASSERT(backendPtr.get() != nullptr);
-
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- auto backendSpecificOptimizations = backendPtr->GetOptimizations();
- ARMNN_NO_DEPRECATE_WARN_END
-
- if (!backendSpecificOptimizations.empty())
- {
- Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
- }
- }
-
return optNet;
}
bool NetworkImpl::GetShapeInferenceMethod()
@@ -1938,15 +1856,6 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescr
return layer;
}
-IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
-{
- Optional<ConstTensor> optionalWeights(weights);
- return AddFullyConnectedLayer(fullyConnectedDescriptor, optionalWeights, biases, name);
-}
-
IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name)
{
@@ -2060,25 +1969,6 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
}
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name)
-{
- Optional<ConstTensor> biases;
- return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
-}
-
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name)
-{
- Optional<ConstTensor> optionalBiases(biases);
- return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
-}
-
IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
const ConstTensor& anchors, const char* name)
{
@@ -2147,17 +2037,6 @@ IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
return m_Graph->AddLayer<MinimumLayer>(name);
}
-IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name)
-{
- return AddConcatLayer(mergerDescriptor, name);
-}
-
-IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
-{
- return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
{
return m_Graph->AddLayer<AdditionLayer>(name);
@@ -2201,20 +2080,6 @@ IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDes
return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
- const char* name)
-{
- ResizeDescriptor resizeDescriptor;
- resizeDescriptor.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
- resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
- resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
- resizeDescriptor.m_AlignCorners = descriptor.m_AlignCorners;
- resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
-
- return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
-}
-
IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
{
return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
@@ -2452,27 +2317,6 @@ IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescripto
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
-IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
-{
- return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
-}
-
-IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
-{
- return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
-}
-
-IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
-{
- return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
-{
- GatherDescriptor gatherDescriptor{};
- return AddGatherLayer(gatherDescriptor, name);
-}
-
IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name)
{
@@ -2863,6 +2707,7 @@ IConnectableLayer* NetworkImpl::AddUnidirectionalSequenceLstmLayer(
return layer;
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void NetworkImpl::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
@@ -2870,6 +2715,7 @@ void NetworkImpl::Accept(ILayerVisitor& visitor) const
layer->Accept(visitor);
};
}
+ARMNN_NO_DEPRECATE_WARN_END
void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 11759c71de..eb1d39d2f6 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -41,9 +41,6 @@ public:
IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddAbsLayer(const char* name = nullptr);
-
IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name = nullptr);
@@ -78,12 +75,12 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const ConstTensor& biases,
@@ -105,19 +102,6 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
- IConnectableLayer* AddDepthwiseConvolution2dLayer(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const ConstTensor& biases,
- const char* name = nullptr);
-
IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
IConnectableLayer* AddDetectionPostProcessLayer(
@@ -130,9 +114,6 @@ public:
IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddEqualLayer(const char* name = nullptr);
-
IConnectableLayer* AddMergeLayer(const char* name = nullptr);
IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
@@ -148,21 +129,9 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
- IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("This AddGatherLayer overload is deprecated")
- IConnectableLayer* AddGatherLayer(const char* name = nullptr);
-
IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
- IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
-
IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
const char* name = nullptr);
@@ -185,10 +154,6 @@ public:
IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
- IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
- const char* name = nullptr);
-
IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
@@ -220,19 +185,12 @@ public:
IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
- IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr);
-
IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr);
IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
- IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
-
IConnectableLayer* AddShapeLayer(const char* name = nullptr);
IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
@@ -274,7 +232,9 @@ public:
const LstmInputParams& params,
const char* name = nullptr);
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const;
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index da2c39d4b6..3fc93df727 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -378,16 +378,6 @@ void StringifyLayerParameters<ReshapeDescriptor>::Serialize(ParameterStringifyFu
fn("TargetShape",ss.str());
}
-void StringifyLayerParameters<ResizeBilinearDescriptor>::Serialize(ParameterStringifyFunction& fn,
- const ResizeBilinearDescriptor& desc)
-{
- fn("TargetWidth", std::to_string(desc.m_TargetWidth));
- fn("TargetHeight", std::to_string(desc.m_TargetHeight));
- fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
- fn("AlignCorners", std::to_string(desc.m_AlignCorners));
- fn("HalfPixelCenters", std::to_string(desc.m_HalfPixelCenters));
-}
-
void StringifyLayerParameters<ResizeDescriptor>::Serialize(ParameterStringifyFunction& fn,
const ResizeDescriptor& desc)
{
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 8a3630ce9d..5c1e6f3759 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -139,11 +139,6 @@ template <> struct StringifyLayerParameters<ReshapeDescriptor>
static void Serialize(ParameterStringifyFunction& fn, const ReshapeDescriptor& desc);
};
-template <> struct StringifyLayerParameters<ResizeBilinearDescriptor>
-{
- static void Serialize(ParameterStringifyFunction& fn, const ResizeBilinearDescriptor& desc);
-};
-
template <> struct StringifyLayerParameters<ResizeDescriptor>
{
static void Serialize(ParameterStringifyFunction& fn, const ResizeDescriptor& desc);
diff --git a/src/armnn/SubgraphView.hpp b/src/armnn/SubgraphView.hpp
index cb9e415dd2..af6054283e 100644
--- a/src/armnn/SubgraphView.hpp
+++ b/src/armnn/SubgraphView.hpp
@@ -98,10 +98,4 @@ private:
/// The list of pointers to the layers of the parent graph.
Layers m_Layers;
};
-
-///
-/// Old SubGraph definition kept for backward compatibility only.
-///
-using SubGraph ARMNN_DEPRECATED_MSG("SubGraph is deprecated, use SubgraphView instead") = SubgraphView;
-
} // namespace armnn
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 7aa4099641..e103b7fad3 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -46,9 +46,11 @@ void AbsLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void AbsLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitAbsLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 0e5ccb042a..9ab66624f6 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create an AbsLayer.
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 7bfa28ef73..3abb4c46da 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -45,9 +45,11 @@ void ActivationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ActivationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitActivationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 5ffcc3e1f5..47b7f66280 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -26,7 +26,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index 8b1f2a8dff..b6db7062be 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -32,9 +32,11 @@ AdditionLayer* AdditionLayer::Clone(Graph& graph) const
return CloneBase<AdditionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void AdditionLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitAdditionLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 4af576a130..71a8553078 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -23,7 +23,9 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
AdditionLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create an AdditionLayer.
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 219f34682c..5e469a4d07 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -86,9 +86,11 @@ void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 761d4a0a36..f2125361ce 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ArgMinMaxLayer.
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index e3ee643ac5..e52b986add 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -70,6 +70,7 @@ Layer::ConstantTensors BatchNormalizationLayer::GetConstantTensorsByRef()
return {m_Mean, m_Variance, m_Beta, m_Gamma};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedMean(m_Mean);
@@ -85,6 +86,7 @@ void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitBatchNormalizationLayer(
this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 2777633a34..10ca7eca25 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -39,7 +39,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 4b33b96229..0b6eab54f0 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -95,9 +95,11 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ outputShape });
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void BatchToSpaceNdLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitBatchToSpaceNdLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index da7585b51e..bb6eb7129d 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a BatchToSpaceNdLayer.
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index 16dd9a3744..485bbf0158 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -46,10 +46,12 @@ void CastLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void CastLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("CastLayer VisitCastLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/CastLayer.hpp b/src/armnn/layers/CastLayer.hpp
index 8a9ea43934..e0448131a2 100644
--- a/src/armnn/layers/CastLayer.hpp
+++ b/src/armnn/layers/CastLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a CastLayer.
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index a3b85f1ba7..884f3ab03d 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -46,9 +46,12 @@ void ChannelShuffleLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
}
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ChannelShuffleLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("ChannelShuffleLayer: VisitChannelShuffleLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
} \ No newline at end of file
diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp
index 399b651f5e..903d161107 100644
--- a/src/armnn/layers/ChannelShuffleLayer.hpp
+++ b/src/armnn/layers/ChannelShuffleLayer.hpp
@@ -11,7 +11,9 @@ namespace armnn
class ChannelShuffleLayer : public LayerWithParameters<ChannelShuffleDescriptor>
{
public:
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
/// Creates a dynamically-allocated copy of this layer.
/// @param graph The graph into which this layer is being cloned
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 399834d72d..c644cb17c7 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -74,9 +74,11 @@ void ComparisonLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ComparisonLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitComparisonLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index bcb0dc2fdd..07534afab1 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -35,7 +35,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ComparisonLayer
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 238fdb66d9..892c18e62c 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -318,9 +318,11 @@ void ConcatLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConcatLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitConcatLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 4315d66436..fefedea608 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -44,7 +44,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConcatLayer.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index feeb762263..e738e59bdb 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -62,12 +62,14 @@ void ConstantLayer::ValidateTensorShapesFromInputs()
outShape);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConstantLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
ConstTensor layerOutputTensor(managedLayerOutput.GetTensorInfo(), managedLayerOutput.Map());
visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index ead8816684..a9a9d37f54 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -39,7 +39,9 @@ public:
/// Free up the constant source data stored by the layer.
void ReleaseConstantData() override {}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 3577723a38..b7fa3a6b3f 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -47,6 +47,7 @@ void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
@@ -54,5 +55,6 @@ void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index d9df0bdf38..d2c006655c 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertBf16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 3b6f72c440..77e6f668ac 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -47,6 +47,7 @@ void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
@@ -54,5 +55,6 @@ void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index 4eadb9f11a..59faf6486d 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertFp16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index f909769b9d..6a003dc922 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -48,6 +48,7 @@ void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
@@ -55,5 +56,6 @@ void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToBf16Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 57fbe13e12..8e33cb2d6a 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertFp32ToBf16Layer.
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 3e6f055a4a..8c96909215 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,6 +47,7 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
{
// These conversion layers are only inserted by the
@@ -54,5 +55,6 @@ void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index 5652a472a2..e331c7d59a 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -27,7 +27,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertFp32ToFp16Layer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index b7bf0462d8..ae29d833e8 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -143,6 +143,7 @@ Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
@@ -158,6 +159,7 @@ void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index a33cda27cb..844747831c 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -42,7 +42,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index ade09ed3d4..07d59be7a3 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -52,11 +52,13 @@ void DebugLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
IgnoreUnused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index e71e05a8d5..054f5e4d2b 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DebugLayer.
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index dfa575b7a3..ba06ad6c31 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -75,9 +75,11 @@ void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitDepthToSpaceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index 0730d4d3ea..d9f6752cbd 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -35,7 +35,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DepthToSpaceLayer.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index ed52b39050..86c994745c 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -148,6 +148,7 @@ Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
@@ -163,6 +164,7 @@ void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 51f6ea9453..8f8f020a0f 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -41,7 +41,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index cbe9ae17b5..f8a2e057ac 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -46,9 +46,11 @@ void DequantizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DequantizeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitDequantizeLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index a5750ddaab..99bde85f72 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DequantizeLayer.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index bd94d1d281..41c44d08a9 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -78,6 +78,7 @@ Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef()
return { m_Anchors };
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedAnchors(m_Anchors);
@@ -85,6 +86,7 @@ void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
m_Anchors->Unmap();
}
+ARMNN_NO_DEPRECATE_WARN_END
void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index b409134c1c..1826645fc6 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index 5b032ce998..17b671a5f4 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -32,9 +32,11 @@ DivisionLayer* DivisionLayer::Clone(Graph& graph) const
return CloneBase<DivisionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void DivisionLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitDivisionLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 4427a4c4cb..91bccfc184 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -24,7 +24,9 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
DivisionLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a DivisionLayer.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 8c94106818..6f07cf93f9 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -61,9 +61,11 @@ void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index f6f8862da4..1261882e0b 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ElementwiseUnaryLayer
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 102a6725a7..69f0166d0e 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -46,11 +46,13 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 78e49e6474..c115c63f33 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 41471c3412..45fe07244b 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -51,9 +51,11 @@ void FillLayer::ValidateTensorShapesFromInputs()
inferredShapes[0][0]);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FillLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitFillLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index eeed141128..096d9ba7dc 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -27,7 +27,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a FillLayer.
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index e03bdb16ff..a975ee8d97 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -45,9 +45,11 @@ void FloorLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FloorLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitFloorLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 07cf151a8a..2b16cfab26 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a FloorLayer.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 259d4149c8..2c41d74923 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -80,10 +80,12 @@ Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitFullyConnectedLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index 5639bf27b4..e97282d73f 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,7 +43,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index cdbdaabcdc..e8b67b8348 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -83,9 +83,11 @@ void GatherLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void GatherLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitGatherLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 3bc8c69bc4..8c294079c3 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -34,7 +34,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a GatherLayer.
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index 0f96611792..21246f146b 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -35,9 +35,11 @@ void InputLayer::ValidateTensorShapesFromInputs()
"InputLayer should already have the TensorInfo set.");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void InputLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitInputLayer(this, this->GetBindingId(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index ff6b521bf0..2b73dcec35 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create an InputLayer.
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 87c6877df8..657b44220d 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -46,9 +46,11 @@ void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitInstanceNormalizationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index 799cf28f8c..addd61e4f8 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a InstanceNormalizationLayer.
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index c96e708075..7bddbf1f18 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -46,9 +46,11 @@ void L2NormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitL2NormalizationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 5d58077ba8..21072b20a0 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a L2NormalizationLayer.
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 24e79ce8ae..ea2518289f 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -45,9 +45,11 @@ void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitLogSoftmaxLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index b21bece98d..9963f85f30 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -29,7 +29,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a LogSoftmaxLayer.
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 0ae5ea5641..3940b85e7b 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -72,9 +72,11 @@ void LogicalBinaryLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp
index c6b024b36b..caeaa0a1af 100644
--- a/src/armnn/layers/LogicalBinaryLayer.hpp
+++ b/src/armnn/layers/LogicalBinaryLayer.hpp
@@ -35,7 +35,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a LogicalBinaryLayer
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 0fea668b97..a18fdb062a 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -300,6 +300,7 @@ Layer::ConstantTensors LstmLayer::GetConstantTensorsByRef()
m_LayerNormParameters.m_OutputLayerNormWeights};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void LstmLayer::Accept(ILayerVisitor& visitor) const
{
LstmInputParams inputParams;
@@ -509,6 +510,7 @@ void LstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index dc6d12a1d8..fbcc03dd6f 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -44,7 +44,9 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 608a71eba6..6defdab076 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -41,10 +41,12 @@ void MapLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT(GetNumOutputSlots() == 0);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MapLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("MapLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MapLayer.hpp b/src/armnn/layers/MapLayer.hpp
index 620caf73e9..d82c44a36f 100644
--- a/src/armnn/layers/MapLayer.hpp
+++ b/src/armnn/layers/MapLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a MapLayer.
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index d57e9e63ab..95faeea2f2 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -31,9 +31,11 @@ MaximumLayer* MaximumLayer::Clone(Graph& graph) const
return CloneBase<MaximumLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MaximumLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMaximumLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 743f79b373..f032b8867d 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -24,7 +24,9 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MaximumLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a MaximumLayer.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index 9d4265cdcf..b704e2a336 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -103,9 +103,11 @@ void MeanLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MeanLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMeanLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 3a094bf6fe..94b0cbe1a3 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -29,7 +29,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a MeanLayer.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 40c1b98012..61fa462e94 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -49,11 +49,13 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index b913c529e5..3c6fd0d8d7 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index c96f92bc5e..689678e693 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -49,11 +49,13 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 47379701c7..778770132c 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -28,7 +28,9 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index 74a31a87b8..2bd29f286d 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -58,9 +58,11 @@ std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorS
return {inputShapes[0]};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MergeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMergeLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 07f69004b5..d7cfcf3d1f 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -33,7 +33,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a MergeLayer.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index f60815ed6b..38ab442fd5 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -32,9 +32,11 @@ MinimumLayer* MinimumLayer::Clone(Graph& graph) const
return CloneBase<MinimumLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MinimumLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMinimumLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 2db06292fd..634591e935 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -24,7 +24,10 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MinimumLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a MinimumLayer.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 8fc13aca76..4ff188cc37 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -32,9 +32,11 @@ MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
return CloneBase<MultiplicationLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void MultiplicationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitMultiplicationLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 692f40784c..8acf4f6d0d 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -24,7 +24,10 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
MultiplicationLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a MultiplicationLayer.
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 4bf97edb72..bd38fa43b5 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -46,9 +46,11 @@ void NormalizationLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void NormalizationLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitNormalizationLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 00a4435527..e36e8863a8 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a NormalizationLayer.
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index d14337fd11..579aede6b0 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -37,9 +37,11 @@ void OutputLayer::ValidateTensorShapesFromInputs()
"OutputLayer: Input slot must be connected.");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void OutputLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitOutputLayer(this, GetBindingId(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 408a28a6f3..d2bdf19ddd 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -40,7 +40,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create an OutputLayer.
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 2c53f20703..78af9d3c47 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -71,9 +71,11 @@ void PadLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PadLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPadLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index 5664997597..9a31ae5d60 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape> &inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a PadLayer.
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 859e687cb3..1c563addf9 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -57,9 +57,11 @@ void PermuteLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PermuteLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPermuteLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index 67be2e1939..db256b361b 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -60,7 +60,10 @@ public:
GetPermutation().IsEqual(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation());
}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a PermuteLayer.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 0deafaacdd..d22bce2022 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -117,9 +117,11 @@ void Pooling2dLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPooling2dLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 90c9a44fbd..677c10b661 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a Pooling2dLayer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 67c1db4011..14dffe5e80 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -49,11 +49,13 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
m_PreCompiledObject = std::move(preCompiledObject);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index 0db1472413..e2c5e802fb 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -33,7 +33,10 @@ public:
void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 18d81ae9b6..9fb9f07f8c 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -116,9 +116,11 @@ void PreluLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void PreluLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitPreluLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 511be29d17..eecffbcd22 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a PreluLayer.
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 0294afdc0d..493e3fe189 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -302,6 +302,7 @@ Layer::ConstantTensors QLstmLayer::GetConstantTensorsByRef()
m_LayerNormParameters.m_OutputLayerNormWeights};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void QLstmLayer::Accept(ILayerVisitor& visitor) const
{
LstmInputParams inputParams;
@@ -531,6 +532,7 @@ void QLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 38a0464da6..12774a935e 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -107,7 +107,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 6ce28c4153..e37d6f5300 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -45,9 +45,11 @@ void QuantizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void QuantizeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitQuantizeLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index 2f331a493c..d8898ba1e9 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -23,7 +23,10 @@ public:
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
QuantizeLayer(const char* name);
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index be50f4863b..81642198fb 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -169,6 +169,7 @@ Layer::ConstantTensors QuantizedLstmLayer::GetConstantTensorsByRef()
};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
{
QuantizedLstmInputParams inputParams;
@@ -305,6 +306,7 @@ void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 25cc7b7d8b..fe7d423145 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -69,7 +69,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 3b14ef0d93..a1e06efa11 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -41,10 +41,13 @@ void RankLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
}
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void RankLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitRankLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void RankLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index fbd2824bb5..416e1b0f6e 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -22,7 +22,10 @@ class RankLayer : public Layer
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 31a2dfa479..07651fca67 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -102,9 +102,11 @@ void ReduceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ReduceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitReduceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp
index fd4f2073f1..a6ac44e69c 100644
--- a/src/armnn/layers/ReduceLayer.hpp
+++ b/src/armnn/layers/ReduceLayer.hpp
@@ -27,7 +27,10 @@ public:
/// will lead to a valid configuration of @ref ReduceLayer.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a ReduceLayer.
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index f303ff7c68..1b9e691bcf 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -53,9 +53,11 @@ void ReshapeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ReshapeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitReshapeLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index 78335e6a1a..d107b5cfc8 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -45,7 +45,10 @@ public:
m_Param.m_TargetShape == PolymorphicDowncast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape;
}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a ReshapeLayer.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 3a390d43cd..c190f494d1 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -75,9 +75,11 @@ void ResizeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ResizeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitResizeLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index 34625857f8..fab18c7716 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a ResizeLayer.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 9c09701ab8..a0572da7c3 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -47,9 +47,11 @@ void RsqrtLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void RsqrtLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitRsqrtLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index 4fcbf72120..a31aea6498 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create an RsqrtLayer.
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index 4193fa9aab..6a55a2d296 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -59,11 +59,13 @@ std::vector<TensorShape> ShapeLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ outputShape });
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void ShapeLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("ShapeLayer VisitShapeLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
void ShapeLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/ShapeLayer.hpp b/src/armnn/layers/ShapeLayer.hpp
index fee285c2f0..35ef873792 100644
--- a/src/armnn/layers/ShapeLayer.hpp
+++ b/src/armnn/layers/ShapeLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index b512ca4915..e7d8f1ed19 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -59,9 +59,11 @@ std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ outputShape });
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SliceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSliceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index 0505a056c5..dda66a1be6 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SliceLayer.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 9882da42b0..eab5b85e45 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -46,9 +46,11 @@ void SoftmaxLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSoftmaxLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index cbdd7c58f9..035e7bcf2d 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SoftmaxLayer.
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index b9e33314ef..3f58b3f6c9 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -83,9 +83,11 @@ void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 28857d8aba..70972bd8b3 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SpaceToBatchNdLayer.
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 90ba8fc8c3..1a3112c495 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -77,9 +77,11 @@ void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index a8bc1089a3..267ac3b089 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -35,7 +35,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SpaceToDepthLayer.
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 5e6622e13a..c1e191c1a8 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -241,9 +241,11 @@ void SplitterLayer::ValidateTensorShapesFromInputs()
}
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SplitterLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSplitterLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index f90696b1ad..1fc37ef295 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -43,7 +43,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SplitterLayer.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 11935a1acf..fe2d123244 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -95,9 +95,11 @@ void StackLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void StackLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitStackLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 3d05da0bf6..8d38907de7 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a StackLayer.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index 6281f3e51e..ccf152921a 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -41,8 +41,10 @@ void StandInLayer::ValidateTensorShapesFromInputs()
// so do nothing here.
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void StandInLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitStandInLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index 2864753efa..bb500065eb 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -37,7 +37,10 @@ public:
/// Accepts a visitor object and calls VisitStandInLayer() method.
/// @param visitor The visitor on which to call VisitStandInLayer() method.
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a StandInLayer.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index c8f36355ae..aa7012c9a5 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -111,9 +111,11 @@ void StridedSliceLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitStridedSliceLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 35ac3709da..7e17cb2e84 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -34,7 +34,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a StridedSliceLayer.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 34087bd466..bed708513e 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -32,9 +32,11 @@ SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
return CloneBase<SubtractionLayer>(graph, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SubtractionLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSubtractionLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 527b50bcad..8c31479c8e 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -24,7 +24,10 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
SubtractionLayer* Clone(Graph& graph) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SubtractionLayer.
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 879263955f..258a7ffaec 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -52,9 +52,11 @@ void SwitchLayer::ValidateTensorShapesFromInputs()
GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void SwitchLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitSwitchLayer(this, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index 025f379c99..a36261b51a 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a SwitchLayer.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index c774dd0bbf..acdbebe802 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -121,6 +121,7 @@ Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
return {m_Weight, m_Bias};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
{
ManagedConstTensorHandle managedWeight(m_Weight);
@@ -136,6 +137,7 @@ void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 1b17dac3c6..b6db41c2b7 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -40,7 +40,10 @@ public:
/// @return A vector of the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 8951fe4637..ffd8693049 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -57,9 +57,11 @@ void TransposeLayer::ValidateTensorShapesFromInputs()
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void TransposeLayer::Accept(ILayerVisitor& visitor) const
{
visitor.VisitTransposeLayer(this, GetParameters(), GetName());
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index a4245242ed..8449db4d9d 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -58,7 +58,10 @@ public:
GetPermutation().IsEqual(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation());
}
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a TransposeLayer.
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 45417069e4..a3671a0c42 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -307,11 +307,13 @@ Layer::ConstantTensors UnidirectionalSequenceLstmLayer::GetConstantTensorsByRef(
m_LayerNormParameters.m_OutputLayerNormWeights};
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void UnidirectionalSequenceLstmLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("UnidirectionalSequenceLstmLayer: VisitUnidirectionalSequenceLstmLayer is not implemented");
}
+ARMNN_NO_DEPRECATE_WARN_END
void UnidirectionalSequenceLstmLayer::ExecuteStrategy(IStrategy& strategy) const
{
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
index fb59f01ab6..857d2776a9 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
@@ -44,7 +44,10 @@ public:
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
void ExecuteStrategy(IStrategy& strategy) const override;
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index 4a43f9ff21..fa5dd9ebf2 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -41,10 +41,12 @@ void UnmapLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT(GetNumOutputSlots() == 0);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
void UnmapLayer::Accept(ILayerVisitor& visitor) const
{
IgnoreUnused(visitor);
throw armnn::Exception("UnmapLayer should not appear in an input graph");
}
+ARMNN_NO_DEPRECATE_WARN_END
} // namespace armnn
diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp
index 12d4342d62..3d1d11534e 100644
--- a/src/armnn/layers/UnmapLayer.hpp
+++ b/src/armnn/layers/UnmapLayer.hpp
@@ -28,7 +28,10 @@ public:
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
protected:
/// Constructor to create a UnmapLayer.
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index d3d8698972..e21e777409 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -58,73 +58,6 @@ void TestLstmLayerVisitor::CheckDescriptor(const LstmDescriptor& descriptor)
CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
}
-void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
- const ConstTensor* expected,
- const ConstTensor* actual)
-{
- if (expected == nullptr)
- {
- CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
- }
- else
- {
- CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
- if (actual != nullptr)
- {
- CheckConstTensors(*expected, *actual);
- }
- }
-}
-
-void TestLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
-{
- CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
- CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
- CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
- CheckConstTensorPtrs("InputToInputWeights",
- m_InputParams.m_InputToInputWeights, inputParams.m_InputToInputWeights);
- CheckConstTensorPtrs("InputToForgetWeights",
- m_InputParams.m_InputToForgetWeights, inputParams.m_InputToForgetWeights);
- CheckConstTensorPtrs("InputToCellWeights", m_InputParams.m_InputToCellWeights, inputParams.m_InputToCellWeights);
- CheckConstTensorPtrs(
- "InputToOutputWeights", m_InputParams.m_InputToOutputWeights, inputParams.m_InputToOutputWeights);
- CheckConstTensorPtrs(
- "RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, inputParams.m_RecurrentToInputWeights);
- CheckConstTensorPtrs(
- "RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, inputParams.m_RecurrentToForgetWeights);
- CheckConstTensorPtrs(
- "RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, inputParams.m_RecurrentToCellWeights);
- CheckConstTensorPtrs(
- "RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, inputParams.m_RecurrentToOutputWeights);
- CheckConstTensorPtrs(
- "CellToInputWeights", m_InputParams.m_CellToInputWeights, inputParams.m_CellToInputWeights);
- CheckConstTensorPtrs(
- "CellToForgetWeights", m_InputParams.m_CellToForgetWeights, inputParams.m_CellToForgetWeights);
- CheckConstTensorPtrs(
- "CellToOutputWeights", m_InputParams.m_CellToOutputWeights, inputParams.m_CellToOutputWeights);
- CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
- CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
- CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
-}
-
-void TestQLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
- const ConstTensor* expected,
- const ConstTensor* actual)
-{
- if (expected == nullptr)
- {
- CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
- }
- else
- {
- CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
- if (actual != nullptr)
- {
- CheckConstTensors(*expected, *actual);
- }
- }
-}
-
void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
{
CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
@@ -134,95 +67,6 @@ void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
}
-void TestQLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
-{
- CheckConstTensorPtrs("InputToInputWeights",
- m_InputParams.m_InputToInputWeights,
- inputParams.m_InputToInputWeights);
-
- CheckConstTensorPtrs("InputToForgetWeights",
- m_InputParams.m_InputToForgetWeights,
- inputParams.m_InputToForgetWeights);
-
- CheckConstTensorPtrs("InputToCellWeights",
- m_InputParams.m_InputToCellWeights,
- inputParams.m_InputToCellWeights);
-
- CheckConstTensorPtrs("InputToOutputWeights",
- m_InputParams.m_InputToOutputWeights,
- inputParams.m_InputToOutputWeights);
-
- CheckConstTensorPtrs("RecurrentToInputWeights",
- m_InputParams.m_RecurrentToInputWeights,
- inputParams.m_RecurrentToInputWeights);
-
- CheckConstTensorPtrs("RecurrentToForgetWeights",
- m_InputParams.m_RecurrentToForgetWeights,
- inputParams.m_RecurrentToForgetWeights);
-
- CheckConstTensorPtrs("RecurrentToCellWeights",
- m_InputParams.m_RecurrentToCellWeights,
- inputParams.m_RecurrentToCellWeights);
-
- CheckConstTensorPtrs("RecurrentToOutputWeights",
- m_InputParams.m_RecurrentToOutputWeights,
- inputParams.m_RecurrentToOutputWeights);
-
- CheckConstTensorPtrs("CellToInputWeights",
- m_InputParams.m_CellToInputWeights,
- inputParams.m_CellToInputWeights);
-
- CheckConstTensorPtrs("CellToForgetWeights",
- m_InputParams.m_CellToForgetWeights,
- inputParams.m_CellToForgetWeights);
-
- CheckConstTensorPtrs("CellToOutputWeights",
- m_InputParams.m_CellToOutputWeights,
- inputParams.m_CellToOutputWeights);
-
- CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
- CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
-
- CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
- CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
- CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
- CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
-
- CheckConstTensorPtrs("InputLayerNormWeights",
- m_InputParams.m_InputLayerNormWeights,
- inputParams.m_InputLayerNormWeights);
-
- CheckConstTensorPtrs("ForgetLayerNormWeights",
- m_InputParams.m_ForgetLayerNormWeights,
- inputParams.m_ForgetLayerNormWeights);
-
- CheckConstTensorPtrs("CellLayerNormWeights",
- m_InputParams.m_CellLayerNormWeights,
- inputParams.m_CellLayerNormWeights);
-
- CheckConstTensorPtrs("OutputLayerNormWeights",
- m_InputParams.m_OutputLayerNormWeights,
- inputParams.m_OutputLayerNormWeights);
-}
-
-void TestQuantizedLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
- const ConstTensor* expected,
- const ConstTensor* actual)
-{
- if (expected == nullptr)
- {
- CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
- }
- else
- {
- CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
- if (actual != nullptr)
- {
- CheckConstTensors(*expected, *actual);
- }
- }
-}
-
void TestQuantizedLstmLayerVisitor::CheckInputParameters(const QuantizedLstmInputParams& inputParams)
{
CheckConstTensorPtrs("InputToInputWeights",
@@ -285,7 +129,7 @@ TEST_CASE("CheckConvolution2dLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedConvolution2dLayer")
@@ -309,7 +153,7 @@ TEST_CASE("CheckNamedConvolution2dLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckConvolution2dLayerWithBiases")
@@ -338,7 +182,7 @@ TEST_CASE("CheckConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
@@ -368,7 +212,7 @@ TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckDepthwiseConvolution2dLayer")
@@ -391,7 +235,7 @@ TEST_CASE("CheckDepthwiseConvolution2dLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
@@ -418,7 +262,7 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
weights,
EmptyOptional(),
layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
@@ -447,7 +291,7 @@ TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
@@ -477,7 +321,7 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
NetworkImpl net;
IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckFullyConnectedLayer")
@@ -500,8 +344,8 @@ TEST_CASE("CheckFullyConnectedLayer")
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
- weightsLayer->Accept(weightsVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedFullyConnectedLayer")
@@ -525,8 +369,8 @@ TEST_CASE("CheckNamedFullyConnectedLayer")
IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
- weightsLayer->Accept(weightsVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckFullyConnectedLayerWithBiases")
@@ -556,9 +400,9 @@ TEST_CASE("CheckFullyConnectedLayerWithBiases")
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- weightsLayer->Accept(weightsVisitor);
- biasesLayer->Accept(biasesVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ biasesLayer->ExecuteStrategy(biasesVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
@@ -589,9 +433,9 @@ TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
- weightsLayer->Accept(weightsVisitor);
- biasesLayer->Accept(biasesVisitor);
- layer->Accept(visitor);
+ weightsLayer->ExecuteStrategy(weightsVisitor);
+ biasesLayer->ExecuteStrategy(biasesVisitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckBatchNormalizationLayer")
@@ -621,7 +465,7 @@ TEST_CASE("CheckBatchNormalizationLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedBatchNormalizationLayer")
@@ -653,7 +497,7 @@ TEST_CASE("CheckNamedBatchNormalizationLayer")
IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
descriptor, mean, variance, beta, gamma, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckConstLayer")
@@ -667,7 +511,7 @@ TEST_CASE("CheckConstLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConstantLayer(input);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedConstLayer")
@@ -682,7 +526,7 @@ TEST_CASE("CheckNamedConstLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckLstmLayerBasic")
@@ -754,7 +598,7 @@ TEST_CASE("CheckLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerBasic")
@@ -827,7 +671,7 @@ TEST_CASE("CheckNamedLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckLstmLayerCifgDisabled")
@@ -918,7 +762,7 @@ TEST_CASE("CheckLstmLayerCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerCifgDisabled")
@@ -1010,7 +854,7 @@ TEST_CASE("CheckNamedLstmLayerCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
// TODO add one with peephole
@@ -1097,7 +941,7 @@ TEST_CASE("CheckLstmLayerPeephole")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
@@ -1211,7 +1055,7 @@ TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerPeephole")
@@ -1298,7 +1142,7 @@ TEST_CASE("CheckNamedLstmLayerPeephole")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
// TODO add one with projection
@@ -1385,7 +1229,7 @@ TEST_CASE("CheckLstmLayerProjection")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedLstmLayerProjection")
@@ -1472,7 +1316,7 @@ TEST_CASE("CheckNamedLstmLayerProjection")
NetworkImpl net;
IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerBasic")
@@ -1544,7 +1388,7 @@ TEST_CASE("CheckQLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedQLstmLayerBasic")
@@ -1617,7 +1461,7 @@ TEST_CASE("CheckNamedQLstmLayerBasic")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgDisabled")
@@ -1712,7 +1556,7 @@ TEST_CASE("CheckQLstmLayerCifgDisabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
@@ -1829,7 +1673,7 @@ TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
@@ -1919,7 +1763,7 @@ TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerProjectionEnabled")
@@ -2009,7 +1853,7 @@ TEST_CASE("CheckQLstmLayerProjectionEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
@@ -2132,7 +1976,7 @@ TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
@@ -2222,7 +2066,7 @@ TEST_CASE("CheckQuantizedLstmLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckNamedQuantizedLstmLayer")
@@ -2312,7 +2156,7 @@ TEST_CASE("CheckNamedQuantizedLstmLayer")
NetworkImpl net;
IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 35e2e872f7..5538852b60 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -5,9 +5,14 @@
#pragma once
#include "TestLayerVisitor.hpp"
+#include "LayersFwd.hpp"
#include <armnn/Descriptors.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
+
+#include <doctest/doctest.h>
namespace armnn
{
@@ -27,17 +32,33 @@ public:
virtual ~TestConvolution2dLayerVisitor() {}
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(convolution2dDescriptor);
- CheckConstTensors(m_Weights, weights);
- CheckOptionalConstTensors(m_Biases, biases);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Convolution2d:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
+ CheckConstTensors(m_Weights, constants[0]);
+ if (m_Biases.has_value())
+ {
+ CHECK(constants.size() == 2);
+ CheckConstTensors(m_Biases.value(), constants[1]);
+ }
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -64,17 +85,33 @@ public:
virtual ~TestDepthwiseConvolution2dLayerVisitor() {}
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(convolution2dDescriptor);
- CheckConstTensors(m_Weights, weights);
- CheckOptionalConstTensors(m_Biases, biases);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::DepthwiseConvolution2d:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
+ CheckConstTensors(m_Weights, constants[0]);
+ if (m_Biases.has_value())
+ {
+ CHECK(constants.size() == 2);
+ CheckConstTensors(m_Biases.value(), constants[1]);
+ }
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -97,13 +134,27 @@ public:
virtual ~TestFullyConnectedLayerVistor() {}
- void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(fullyConnectedDescriptor);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::FullyConnected:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::FullyConnectedDescriptor&>(descriptor));
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -129,21 +180,31 @@ public:
, m_Gamma(gamma)
{}
- void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& descriptor,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(descriptor);
- CheckConstTensors(m_Mean, mean);
- CheckConstTensors(m_Variance, variance);
- CheckConstTensors(m_Beta, beta);
- CheckConstTensors(m_Gamma, gamma);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::BatchNormalization:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor));
+ CheckConstTensors(m_Mean, constants[0]);
+ CheckConstTensors(m_Variance, constants[1]);
+ CheckConstTensors(m_Beta, constants[2]);
+ CheckConstTensors(m_Gamma, constants[3]);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
@@ -166,81 +227,201 @@ public:
, m_Input(input)
{}
- void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckConstTensors(m_Input, input);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Constant:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckConstTensors(m_Input, constants[0]);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
private:
ConstTensor m_Input;
};
-class TestLstmLayerVisitor : public TestLayerVisitor
+// Used to supply utility functions to the actual lstm test visitors
+class LstmVisitor : public TestLayerVisitor
+{
+public:
+ explicit LstmVisitor(const LstmInputParams& params,
+ const char* name = nullptr)
+ : TestLayerVisitor(name)
+ , m_InputParams(params) {}
+
+protected:
+ template<typename LayerType>
+ void CheckInputParameters(const LayerType* layer, const LstmInputParams& inputParams);
+
+ LstmInputParams m_InputParams;
+};
+
+template<typename LayerType>
+void LstmVisitor::CheckInputParameters(const LayerType* layer, const LstmInputParams& inputParams)
+{
+ CheckConstTensorPtrs("OutputGateBias",
+ inputParams.m_OutputGateBias,
+ layer->m_BasicParameters.m_OutputGateBias);
+ CheckConstTensorPtrs("InputToForgetWeights",
+ inputParams.m_InputToForgetWeights,
+ layer->m_BasicParameters.m_InputToForgetWeights);
+ CheckConstTensorPtrs("InputToCellWeights",
+ inputParams.m_InputToCellWeights,
+ layer->m_BasicParameters.m_InputToCellWeights);
+ CheckConstTensorPtrs("InputToOutputWeights",
+ inputParams.m_InputToOutputWeights,
+ layer->m_BasicParameters.m_InputToOutputWeights);
+ CheckConstTensorPtrs("RecurrentToForgetWeights",
+ inputParams.m_RecurrentToForgetWeights,
+ layer->m_BasicParameters.m_RecurrentToForgetWeights);
+ CheckConstTensorPtrs("RecurrentToCellWeights",
+ inputParams.m_RecurrentToCellWeights,
+ layer->m_BasicParameters.m_RecurrentToCellWeights);
+ CheckConstTensorPtrs("RecurrentToOutputWeights",
+ inputParams.m_RecurrentToOutputWeights,
+ layer->m_BasicParameters.m_RecurrentToOutputWeights);
+ CheckConstTensorPtrs("ForgetGateBias",
+ inputParams.m_ForgetGateBias,
+ layer->m_BasicParameters.m_ForgetGateBias);
+ CheckConstTensorPtrs("CellBias",
+ inputParams.m_CellBias,
+ layer->m_BasicParameters.m_CellBias);
+
+ CheckConstTensorPtrs("InputToInputWeights",
+ inputParams.m_InputToInputWeights,
+ layer->m_CifgParameters.m_InputToInputWeights);
+ CheckConstTensorPtrs("RecurrentToInputWeights",
+ inputParams.m_RecurrentToInputWeights,
+ layer->m_CifgParameters.m_RecurrentToInputWeights);
+ CheckConstTensorPtrs("InputGateBias",
+ inputParams.m_InputGateBias,
+ layer->m_CifgParameters.m_InputGateBias);
+
+ CheckConstTensorPtrs("ProjectionBias",
+ inputParams.m_ProjectionBias,
+ layer->m_ProjectionParameters.m_ProjectionBias);
+ CheckConstTensorPtrs("ProjectionWeights",
+ inputParams.m_ProjectionWeights,
+ layer->m_ProjectionParameters.m_ProjectionWeights);
+
+ CheckConstTensorPtrs("CellToInputWeights",
+ inputParams.m_CellToInputWeights,
+ layer->m_PeepholeParameters.m_CellToInputWeights);
+ CheckConstTensorPtrs("CellToForgetWeights",
+ inputParams.m_CellToForgetWeights,
+ layer->m_PeepholeParameters.m_CellToForgetWeights);
+ CheckConstTensorPtrs("CellToOutputWeights",
+ inputParams.m_CellToOutputWeights,
+ layer->m_PeepholeParameters.m_CellToOutputWeights);
+
+ CheckConstTensorPtrs("InputLayerNormWeights",
+ inputParams.m_InputLayerNormWeights,
+ layer->m_LayerNormParameters.m_InputLayerNormWeights);
+ CheckConstTensorPtrs("ForgetLayerNormWeights",
+ inputParams.m_ForgetLayerNormWeights,
+ layer->m_LayerNormParameters.m_ForgetLayerNormWeights);
+ CheckConstTensorPtrs("CellLayerNormWeights",
+ inputParams.m_CellLayerNormWeights,
+ layer->m_LayerNormParameters.m_CellLayerNormWeights);
+ CheckConstTensorPtrs("OutputLayerNormWeights",
+ inputParams.m_OutputLayerNormWeights,
+ layer->m_LayerNormParameters.m_OutputLayerNormWeights);
+}
+
+class TestLstmLayerVisitor : public LstmVisitor
{
public:
explicit TestLstmLayerVisitor(const LstmDescriptor& descriptor,
const LstmInputParams& params,
const char* name = nullptr)
- : TestLayerVisitor(name)
+ : LstmVisitor(params, name)
, m_Descriptor(descriptor)
- , m_InputParams(params)
{}
- void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(descriptor);
- CheckInputParameters(params);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Lstm:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::LstmDescriptor&>(descriptor));
+ CheckInputParameters<const LstmLayer>(PolymorphicDowncast<const LstmLayer*>(layer), m_InputParams);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
void CheckDescriptor(const LstmDescriptor& descriptor);
- void CheckInputParameters(const LstmInputParams& inputParams);
- void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
private:
LstmDescriptor m_Descriptor;
- LstmInputParams m_InputParams;
};
-class TestQLstmLayerVisitor : public TestLayerVisitor
+class TestQLstmLayerVisitor : public LstmVisitor
{
public:
explicit TestQLstmLayerVisitor(const QLstmDescriptor& descriptor,
const LstmInputParams& params,
const char* name = nullptr)
- : TestLayerVisitor(name)
+ : LstmVisitor(params, name)
, m_Descriptor(descriptor)
- , m_InputParams(params)
{}
- void VisitQLstmLayer(const IConnectableLayer* layer,
- const QLstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckDescriptor(descriptor);
- CheckInputParameters(params);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::QLstm:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckDescriptor(static_cast<const armnn::QLstmDescriptor&>(descriptor));
+ CheckInputParameters<const QLstmLayer>(PolymorphicDowncast<const QLstmLayer*>(layer), m_InputParams);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
void CheckDescriptor(const QLstmDescriptor& descriptor);
- void CheckInputParameters(const LstmInputParams& inputParams);
- void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
private:
QLstmDescriptor m_Descriptor;
- LstmInputParams m_InputParams;
};
@@ -253,18 +434,31 @@ public:
, m_InputParams(params)
{}
- void VisitQuantizedLstmLayer(const IConnectableLayer* layer,
- const QuantizedLstmInputParams& params,
- const char* name = nullptr)
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerName(name);
- CheckInputParameters(params);
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::QuantizedLstm:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerName(name);
+ CheckInputParameters(m_InputParams);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
protected:
- void CheckInputParameters(const QuantizedLstmInputParams& inputParams);
- void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
+ void CheckInputParameters(const QuantizedLstmInputParams& params);
private:
QuantizedLstmInputParams m_InputParams;
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 9acb60df4a..25dab596fd 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -398,26 +398,44 @@ TEST_CASE("NetworkModification_SplitterMultiplication")
TEST_CASE("Network_AddQuantize")
{
- struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ struct Test : public armnn::IStrategy
{
- void VisitQuantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- m_Visited = true;
-
- CHECK(layer);
-
- std::string expectedName = std::string("quantize");
- CHECK(std::string(layer->GetName()) == expectedName);
- CHECK(std::string(name) == expectedName);
-
- CHECK(layer->GetNumInputSlots() == 1);
- CHECK(layer->GetNumOutputSlots() == 1);
-
- const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
- CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
-
- const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input: break;
+ case armnn::LayerType::Output: break;
+ case armnn::LayerType::Quantize:
+ {
+ m_Visited = true;
+
+ CHECK(layer);
+
+ std::string expectedName = std::string("quantize");
+ CHECK(std::string(layer->GetName()) == expectedName);
+ CHECK(std::string(name) == expectedName);
+
+ CHECK(layer->GetNumInputSlots() == 1);
+ CHECK(layer->GetNumOutputSlots() == 1);
+
+ const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+ CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+ CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+ break;
+ }
+ default:
+ {
+ // nothing
+ }
+ }
}
bool m_Visited = false;
@@ -440,7 +458,7 @@ TEST_CASE("Network_AddQuantize")
quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
Test testQuantize;
- graph->Accept(testQuantize);
+ graph->ExecuteStrategy(testQuantize);
CHECK(testQuantize.m_Visited == true);
@@ -448,29 +466,47 @@ TEST_CASE("Network_AddQuantize")
TEST_CASE("Network_AddMerge")
{
- struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ struct Test : public armnn::IStrategy
{
- void VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- m_Visited = true;
-
- CHECK(layer);
-
- std::string expectedName = std::string("merge");
- CHECK(std::string(layer->GetName()) == expectedName);
- CHECK(std::string(name) == expectedName);
-
- CHECK(layer->GetNumInputSlots() == 2);
- CHECK(layer->GetNumOutputSlots() == 1);
-
- const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
- CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
-
- const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
- CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
-
- const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input: break;
+ case armnn::LayerType::Output: break;
+ case armnn::LayerType::Merge:
+ {
+ m_Visited = true;
+
+ CHECK(layer);
+
+ std::string expectedName = std::string("merge");
+ CHECK(std::string(layer->GetName()) == expectedName);
+ CHECK(std::string(name) == expectedName);
+
+ CHECK(layer->GetNumInputSlots() == 2);
+ CHECK(layer->GetNumOutputSlots() == 1);
+
+ const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+ CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
+ CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+ CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
+ break;
+ }
+ default:
+ {
+ // nothing
+ }
+ }
}
bool m_Visited = false;
@@ -493,7 +529,7 @@ TEST_CASE("Network_AddMerge")
merge->GetOutputSlot(0).SetTensorInfo(info);
Test testMerge;
- network->Accept(testMerge);
+ network->ExecuteStrategy(testMerge);
CHECK(testMerge.m_Visited == true);
}
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 66da3ad1ff..8416a8dd0d 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -13,13 +13,12 @@
#include <armnn/BackendHelper.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <armnnUtils/FloatingPointConverter.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
-#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
#include <backendsCommon/TensorHandle.hpp>
@@ -201,10 +200,6 @@ public:
return nullptr;
}
- IBackendInternal::Optimizations GetOptimizations() const override
- {
- return {};
- }
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
{
return std::make_shared<MockLayerSupport>();
@@ -265,10 +260,6 @@ public:
return nullptr;
}
- IBackendInternal::Optimizations GetOptimizations() const override
- {
- return {};
- }
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
{
return std::make_shared<MockLayerSupport>();
@@ -707,30 +698,42 @@ TEST_CASE("BackendCapabilityTest")
TEST_CASE("BackendHintTest")
{
- class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
+ class TestBackendAssignment : public StrategyBase<NoThrowStrategy>
{
public:
- void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
- {
- IgnoreUnused(id, name);
- auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
- CHECK((inputLayer->GetBackendId() == "MockBackend"));
- }
-
- void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
- {
- IgnoreUnused(id, name);
- auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
- CHECK((outputLayer->GetBackendId() == "MockBackend"));
- }
- void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- IgnoreUnused(activationDescriptor, name);
- auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
- CHECK((activation->GetBackendId() == "CustomBackend"));
+ armnn::IgnoreUnused(descriptor, constants, id, name);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input:
+ {
+ auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
+ CHECK((inputLayer->GetBackendId() == "MockBackend"));
+ break;
+ }
+ case armnn::LayerType::Output:
+ {
+ auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
+ CHECK((outputLayer->GetBackendId() == "MockBackend"));
+ break;
+ }
+ case armnn::LayerType::Activation:
+ {
+ auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
+ CHECK((activation->GetBackendId() == "CustomBackend"));
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
};
@@ -802,7 +805,7 @@ TEST_CASE("BackendHintTest")
TestBackendAssignment visitor;
for (auto it = firstLayer; it != lastLayer; ++it)
{
- (*it)->Accept(visitor);
+ (*it)->ExecuteStrategy(visitor);
}
// Clean up the registry for the next test.
backendRegistry.Deregister("MockBackend");
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 8462290f81..3b18e07694 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -19,7 +19,7 @@ TEST_CASE("CheckInputLayerVisitorBindingIdAndName")
NetworkImpl net;
IConnectableLayer *const layer = net.AddInputLayer(1, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
@@ -28,7 +28,7 @@ TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
NetworkImpl net;
IConnectableLayer *const layer = net.AddInputLayer(1);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
@@ -38,7 +38,7 @@ TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
NetworkImpl net;
IConnectableLayer *const layer = net.AddOutputLayer(1, layerName);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
@@ -47,7 +47,7 @@ TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
NetworkImpl net;
IConnectableLayer *const layer = net.AddOutputLayer(1);
- layer->Accept(visitor);
+ layer->ExecuteStrategy(visitor);
}
}
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.hpp b/src/armnn/test/TestInputOutputLayerVisitor.hpp
index b89089530e..e812f2f97d 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.hpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.hpp
@@ -27,14 +27,28 @@ public:
, visitorId(id)
{};
- void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerBindingId(visitorId, id);
- CheckLayerName(name);
- };
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Input:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerBindingId(visitorId, id);
+ CheckLayerName(name);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
+ }
};
class TestOutputLayerVisitor : public TestLayerVisitor
@@ -48,14 +62,28 @@ public:
, visitorId(id)
{};
- void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- CheckLayerPointer(layer);
- CheckLayerBindingId(visitorId, id);
- CheckLayerName(name);
- };
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Output:
+ {
+ CheckLayerPointer(layer);
+ CheckLayerBindingId(visitorId, id);
+ CheckLayerName(name);
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
+ }
};
} //namespace armnn
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index ec405119d1..d5f705f0da 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -49,6 +49,62 @@ void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const Cons
}
}
+void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const ConstTensorHandle& actual)
+{
+ auto& actualInfo = actual.GetTensorInfo();
+ CHECK(expected.GetInfo() == actualInfo);
+ CHECK(expected.GetNumDimensions() == actualInfo.GetNumDimensions());
+ CHECK(expected.GetNumElements() == actualInfo.GetNumElements());
+ CHECK(expected.GetNumBytes() == actualInfo.GetNumBytes());
+ if (expected.GetNumBytes() == actualInfo.GetNumBytes())
+ {
+ //check data is the same byte by byte
+ const unsigned char* expectedPtr = static_cast<const unsigned char*>(expected.GetMemoryArea());
+ const unsigned char* actualPtr = static_cast<const unsigned char*>(actual.Map(true));
+ for (unsigned int i = 0; i < expected.GetNumBytes(); i++)
+ {
+ CHECK(*(expectedPtr + i) == *(actualPtr + i));
+ }
+ actual.Unmap();
+ }
+}
+
+void TestLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const std::shared_ptr<ConstTensorHandle> actual)
+{
+ if (expected == nullptr)
+ {
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ }
+ else
+ {
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ if (actual != nullptr)
+ {
+ CheckConstTensors(*expected, *actual);
+ }
+ }
+}
+
+void TestLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const ConstTensor* actual)
+{
+ if (expected == nullptr)
+ {
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ }
+ else
+ {
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ if (actual != nullptr)
+ {
+ CheckConstTensors(*expected, *actual);
+ }
+ }
+}
+
void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
const Optional<ConstTensor>& actual)
{
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index e43227f520..eaf1667800 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -4,13 +4,14 @@
//
#pragma once
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
#include <armnn/Descriptors.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
-// Abstract base class with do nothing implementations for all layer visit methods
-class TestLayerVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
+// Abstract base class with do nothing implementations for all layers
+class TestLayerVisitor : public StrategyBase<NoThrowStrategy>
{
protected:
virtual ~TestLayerVisitor() {}
@@ -19,7 +20,17 @@ protected:
void CheckLayerPointer(const IConnectableLayer* layer);
- void CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual);
+ void CheckConstTensors(const ConstTensor& expected,
+ const ConstTensor& actual);
+ void CheckConstTensors(const ConstTensor& expected,
+ const ConstTensorHandle& actual);
+
+ void CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const ConstTensor* actual);
+ void CheckConstTensorPtrs(const std::string& name,
+ const ConstTensor* expected,
+ const std::shared_ptr<ConstTensorHandle> actual);
void CheckOptionalConstTensors(const Optional<ConstTensor>& expected, const Optional<ConstTensor>& actual);
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 39c00f4604..cfdaaf529b 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -20,7 +20,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor(descriptor, layerName); \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor, layerName); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name, testName) \
@@ -30,7 +30,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor(descriptor); \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
template<typename Descriptor> Descriptor GetDescriptor();
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index a3c1420388..b1f9512655 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -29,15 +29,31 @@ public: \
: armnn::TestLayerVisitor(layerName) \
, m_Descriptor(descriptor) {}; \
\
- void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
- const Descriptor& descriptor, \
- const char* layerName = nullptr) override \
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer, \
+ const armnn::BaseDescriptor& descriptor, \
+ const std::vector<armnn::ConstTensor>& constants, \
+ const char* layerName, \
+ const armnn::LayerBindingId id = 0) override \
{ \
- CheckLayerPointer(layer); \
- CheckDescriptor(descriptor); \
- CheckLayerName(layerName); \
+ armnn::IgnoreUnused(descriptor, constants, id); \
+ switch (layer->GetType()) \
+ { \
+ case armnn::LayerType::Input: break; \
+ case armnn::LayerType::Output: break; \
+ case armnn::LayerType::name: break; \
+ { \
+ CheckLayerPointer(layer); \
+ CheckDescriptor(static_cast<const Descriptor&>(descriptor)); \
+ CheckLayerName(layerName); \
+ break; \
+ } \
+ default: \
+ { \
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType())); \
+ } \
+ } \
} \
-};
+}; \
} // anonymous namespace
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 00d65f8e76..497c36b079 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -18,7 +18,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor("name##Layer"); \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer("name##Layer"); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, testName) \
@@ -27,7 +27,7 @@ TEST_CASE(#testName) \
Test##name##LayerVisitor visitor; \
armnn::NetworkImpl net; \
armnn::IConnectableLayer *const layer = net.Add##name##Layer(); \
- layer->Accept(visitor); \
+ layer->ExecuteStrategy(visitor); \
}
} // anonymous namespace
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index 519cbbacc6..c0db857b71 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -15,12 +15,28 @@ class Test##name##LayerVisitor : public armnn::TestLayerVisitor \
public: \
explicit Test##name##LayerVisitor(const char* layerName = nullptr) : armnn::TestLayerVisitor(layerName) {}; \
\
- void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
- const char* layerName = nullptr) override \
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer, \
+ const armnn::BaseDescriptor& descriptor, \
+ const std::vector<armnn::ConstTensor>& constants, \
+ const char* layerName, \
+ const armnn::LayerBindingId id = 0) override \
{ \
- CheckLayerPointer(layer); \
- CheckLayerName(layerName); \
+ armnn::IgnoreUnused(descriptor, constants, id); \
+ switch (layer->GetType()) \
+ { \
+ case armnn::LayerType::name: \
+ { \
+ CheckLayerPointer(layer); \
+ CheckLayerName(layerName); \
+ break; \
+ } \
+ default: \
+ { \
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType())); \
+ } \
+ } \
} \
+ \
};
} // anonymous namespace
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index eaeab780e4..6b73946af2 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -2412,6 +2412,9 @@ void IDeserializer::DeserializerImpl::ParseResize(GraphPtr graph, unsigned int l
RegisterOutputSlots(graph, layerIndex, layer);
}
+
+/// @Note The ResizeBiliniar operation was deprecated and removed in favor of the Resize operation.
+/// This function is kept for backwards compatibility.
void IDeserializer::DeserializerImpl::ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex)
{
CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/test/DeserializeComparison.cpp b/src/armnnDeserializer/test/DeserializeComparison.cpp
index a941f123d6..3dda34c0fd 100644
--- a/src/armnnDeserializer/test/DeserializeComparison.cpp
+++ b/src/armnnDeserializer/test/DeserializeComparison.cpp
@@ -241,15 +241,6 @@ DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, Float32)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, Float32)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal, QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater, QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less, QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, QuantisedAsymm8)
-ARMNN_NO_DEPRECATE_WARN_END
-
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal, QAsymmU8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater, QAsymmU8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QAsymmU8)
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index efaf9f81cd..7e1b74e10d 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -155,15 +155,6 @@ void SerializerStrategy::SerializeOutputLayer(const armnn::IConnectableLayer* la
CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
}
-void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
- IgnoreUnused(name);
- auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
- auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
-
- CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
-}
-
// Build FlatBuffer for Activation Layer
void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
const armnn::ActivationDescriptor& descriptor,
@@ -570,16 +561,6 @@ void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectabl
CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
}
-void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
- IgnoreUnused(name);
-
- auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
- auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
-
- CreateAnyLayer(fbEqualLayer.o, serializer::Layer::Layer_EqualLayer);
-}
-
void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
const armnn::FillDescriptor& fillDescriptor,
const char* name)
@@ -619,17 +600,6 @@ void SerializerStrategy::SerializeGatherLayer(const armnn::IConnectableLayer* la
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
}
-
-void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
- IgnoreUnused(name);
-
- auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
- auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
-
- CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer);
-}
-
void SerializerStrategy::SerializeInstanceNormalizationLayer(
const armnn::IConnectableLayer* layer,
const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
@@ -874,13 +844,6 @@ void SerializerStrategy::SerializeMergeLayer(const armnn::IConnectableLayer* lay
CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
}
-void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer,
- const armnn::MergerDescriptor& mergerDescriptor,
- const char* name)
-{
- SerializeConcatLayer(layer, mergerDescriptor, name);
-}
-
void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
const armnn::ConcatDescriptor& concatDescriptor,
const char* name)
@@ -1034,29 +997,6 @@ void SerializerStrategy::SerializeReshapeLayer(const armnn::IConnectableLayer* l
CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
}
-void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
- const armnn::ResizeBilinearDescriptor& resizeDescriptor,
- const char* name)
-{
- IgnoreUnused(name);
-
- auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
-
- auto flatBufferDescriptor =
- CreateResizeBilinearDescriptor(m_flatBufferBuilder,
- resizeDescriptor.m_TargetWidth,
- resizeDescriptor.m_TargetHeight,
- GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
- resizeDescriptor.m_AlignCorners,
- resizeDescriptor.m_HalfPixelCenters);
-
- auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
- flatBufferBaseLayer,
- flatBufferDescriptor);
-
- CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
-}
-
void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
const armnn::ResizeDescriptor& resizeDescriptor,
const char* name)
@@ -1081,16 +1021,6 @@ void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* la
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
}
-void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
- IgnoreUnused(name);
-
- auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
- auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
-
- CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer);
-}
-
void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::SliceDescriptor& sliceDescriptor,
const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 1161095c33..2f827ac059 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -104,10 +104,6 @@ private:
uint32_t m_layerId;
private:
- ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
- void SerializeAbsLayer(const armnn::IConnectableLayer* layer,
- const char* name = nullptr);
-
void SerializeActivationLayer(const armnn::IConnectableLayer* layer,
const armnn::ActivationDescriptor& descriptor,
const char* name = nullptr);
@@ -181,9 +177,6 @@ private:
const armnn::ElementwiseUnaryDescriptor& descriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
- void SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name);
-
void SerializeFillLayer(const armnn::IConnectableLayer* layer,
const armnn::FillDescriptor& fillDescriptor,
const char* name = nullptr);
@@ -199,9 +192,6 @@ private:
const armnn::GatherDescriptor& gatherDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
- void SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr);
-
void SerializeInputLayer(const armnn::IConnectableLayer* layer,
armnn::LayerBindingId id,
const char* name = nullptr);
@@ -240,11 +230,6 @@ private:
void SerializeMergeLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead")
- void SerializeMergerLayer(const armnn::IConnectableLayer* layer,
- const armnn::MergerDescriptor& mergerDescriptor,
- const char* name = nullptr);
-
void SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr);
@@ -294,15 +279,6 @@ private:
const armnn::ResizeDescriptor& resizeDescriptor,
const char* name = nullptr);
- ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
- void SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
- const armnn::ResizeBilinearDescriptor& resizeDescriptor,
- const char* name = nullptr);
-
- ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
- void SerializeRsqrtLayer(const armnn::IConnectableLayer* layer,
- const char* name = nullptr);
-
void SerializeSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::SliceDescriptor& sliceDescriptor,
const char* name = nullptr);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 2f8fd73717..f2c9852607 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -748,7 +748,7 @@ TEST_CASE("SerializeDivision")
deserializedNetwork->ExecuteStrategy(verifier);
}
-TEST_CASE("SerializeDeserializeEqual")
+TEST_CASE("SerializeDeserializeComparisonEqual")
{
const std::string layerName("EqualLayer");
const armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32);
@@ -758,9 +758,8 @@ TEST_CASE("SerializeDeserializeEqual")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer2 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const equalLayer = network->AddEqualLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::ComparisonDescriptor equalDescriptor(armnn::ComparisonOperation::Equal);
+ armnn::IConnectableLayer* const equalLayer = network->AddComparisonLayer(equalDescriptor, layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
@@ -1111,10 +1110,7 @@ TEST_CASE("SerializeGather")
}
-// NOTE: Until the deprecated AddGreaterLayer disappears this test checks that calling
-// AddGreaterLayer places a ComparisonLayer into the serialized format and that
-// when this deserialises we have a ComparisonLayer
-TEST_CASE("SerializeGreaterDeprecated")
+TEST_CASE("SerializeComparisonGreater")
{
const std::string layerName("greater");
@@ -1126,9 +1122,8 @@ TEST_CASE("SerializeGreaterDeprecated")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const equalLayer = network->AddGreaterLayer(layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::ComparisonDescriptor greaterDescriptor(armnn::ComparisonOperation::Greater);
+ armnn::IConnectableLayer* const equalLayer = network->AddComparisonLayer(greaterDescriptor, layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer0->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
@@ -1444,44 +1439,6 @@ public:
}
};
-// NOTE: Until the deprecated AddMergerLayer disappears this test checks that calling
-// AddMergerLayer places a ConcatLayer into the serialized format and that
-// when this deserialises we have a ConcatLayer
-TEST_CASE("SerializeMerger")
-{
- const std::string layerName("merger");
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({4, 3, 2, 2}, armnn::DataType::Float32);
-
- const std::vector<armnn::TensorShape> shapes({inputInfo.GetShape(), inputInfo.GetShape()});
-
- armnn::OriginsDescriptor descriptor =
- armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), 0);
-
- armnn::INetworkPtr network = armnn::INetwork::Create();
- armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0);
- armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const mergerLayer = network->AddMergerLayer(descriptor, layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
- armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
- inputLayerOne->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- inputLayerTwo->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
- mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
- inputLayerOne->GetOutputSlot(0).SetTensorInfo(inputInfo);
- inputLayerTwo->GetOutputSlot(0).SetTensorInfo(inputInfo);
- mergerLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
- std::string mergerLayerNetwork = SerializeNetwork(*network);
- armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(mergerLayerNetwork);
- CHECK(deserializedNetwork);
-
- MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor);
- deserializedNetwork->ExecuteStrategy(verifier);
-}
-
TEST_CASE("EnsureMergerLayerBackwardCompatibility")
{
// The hex data below is a flat buffer containing a simple network with two inputs
@@ -1979,14 +1936,14 @@ TEST_CASE("SerializeResize")
deserializedNetwork->ExecuteStrategy(verifier);
}
-class ResizeBilinearLayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::ResizeBilinearDescriptor>
+class ResizeBilinearLayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::ResizeDescriptor>
{
public:
ResizeBilinearLayerVerifier(const std::string& layerName,
const std::vector<armnn::TensorInfo>& inputInfos,
const std::vector<armnn::TensorInfo>& outputInfos,
- const armnn::ResizeBilinearDescriptor& descriptor)
- : LayerVerifierBaseWithDescriptor<armnn::ResizeBilinearDescriptor>(
+ const armnn::ResizeDescriptor& descriptor)
+ : LayerVerifierBaseWithDescriptor<armnn::ResizeDescriptor>(
layerName, inputInfos, outputInfos, descriptor) {}
void ExecuteStrategy(const armnn::IConnectableLayer* layer,
@@ -2022,16 +1979,14 @@ public:
}
};
-// NOTE: Until the deprecated AddResizeBilinearLayer disappears this test checks that
-// calling AddResizeBilinearLayer places a ResizeLayer into the serialized format
-// and that when this deserialises we have a ResizeLayer
TEST_CASE("SerializeResizeBilinear")
{
const std::string layerName("resizeBilinear");
const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
- armnn::ResizeBilinearDescriptor desc;
+ armnn::ResizeDescriptor desc;
+ desc.m_Method = armnn::ResizeMethod::Bilinear;
desc.m_TargetWidth = 4u;
desc.m_TargetHeight = 2u;
desc.m_AlignCorners = true;
@@ -2039,9 +1994,7 @@ TEST_CASE("SerializeResizeBilinear")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const resizeLayer = network->AddResizeBilinearLayer(desc, layerName.c_str());
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::IConnectableLayer* const resizeLayer = network->AddResizeLayer(desc, layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer->GetOutputSlot(0).Connect(resizeLayer->GetInputSlot(0));
@@ -2060,7 +2013,7 @@ TEST_CASE("SerializeResizeBilinear")
TEST_CASE("EnsureResizeBilinearBackwardCompatibility")
{
// The hex data below is a flat buffer containing a simple network with an input,
- // a ResizeBilinearLayer (now deprecated) and an output
+ // a ResizeBilinearLayer (now deprecated and removed) and an output
//
// This test verifies that we can still deserialize this old-style model by replacing
// the ResizeBilinearLayer with an equivalent ResizeLayer
@@ -2105,7 +2058,7 @@ TEST_CASE("EnsureResizeBilinearBackwardCompatibility")
const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
- armnn::ResizeBilinearDescriptor descriptor;
+ armnn::ResizeDescriptor descriptor;
descriptor.m_TargetWidth = 4u;
descriptor.m_TargetHeight = 2u;
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index b405e1958c..8426246414 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -5,7 +5,7 @@
#include "ParserFlatbuffersFixture.hpp"
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
@@ -19,45 +19,55 @@ TEST_SUITE("TensorflowLiteParser_Unsupported")
{
using namespace armnn;
-class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
+class StandInLayerVerifier : public StrategyBase<NoThrowStrategy>
{
public:
StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
const std::vector<TensorInfo>& outputInfos)
- : LayerVisitorBase<VisitorThrowingPolicy>()
- , m_InputInfos(inputInfos)
+ : m_InputInfos(inputInfos)
, m_OutputInfos(outputInfos) {}
- void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
-
- void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
-
- void VisitStandInLayer(const IConnectableLayer* layer,
- const StandInDescriptor& descriptor,
- const char*) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
- CHECK(descriptor.m_NumInputs == numInputs);
- CHECK(layer->GetNumInputSlots() == numInputs);
-
- unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
- CHECK(descriptor.m_NumOutputs == numOutputs);
- CHECK(layer->GetNumOutputSlots() == numOutputs);
-
- const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
- for (unsigned int i = 0u; i < numInputs; ++i)
- {
- const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
- CHECK(connectedSlot != nullptr);
-
- const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
- CHECK(inputInfo == m_InputInfos[i]);
- }
-
- for (unsigned int i = 0u; i < numOutputs; ++i)
+ armnn::IgnoreUnused(descriptor, constants, id);
+ switch (layer->GetType())
{
- const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
- CHECK(outputInfo == m_OutputInfos[i]);
+ case armnn::LayerType::StandIn:
+ {
+ auto standInDescriptor = static_cast<const armnn::StandInDescriptor&>(descriptor);
+ unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
+ CHECK(standInDescriptor.m_NumInputs == numInputs);
+ CHECK(layer->GetNumInputSlots() == numInputs);
+
+ unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
+ CHECK(standInDescriptor.m_NumOutputs == numOutputs);
+ CHECK(layer->GetNumOutputSlots() == numOutputs);
+
+ const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
+ for (unsigned int i = 0u; i < numInputs; ++i)
+ {
+ const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
+ CHECK(connectedSlot != nullptr);
+
+ const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
+ CHECK(inputInfo == m_InputInfos[i]);
+ }
+
+ for (unsigned int i = 0u; i < numOutputs; ++i)
+ {
+ const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
+ CHECK(outputInfo == m_OutputInfos[i]);
+ }
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
}
}
@@ -164,7 +174,7 @@ public:
void RunTest()
{
INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
- network->Accept(m_StandInLayerVerifier);
+ network->ExecuteStrategy(m_StandInLayerVerifier);
}
private:
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 98b5adafbc..06309319f3 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -37,10 +37,6 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi
{
return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case armnn::DataType::QuantizedSymm8PerAxis:
- return arm_compute::DataType::QSYMM8_PER_CHANNEL;
- ARMNN_NO_DEPRECATE_WARN_END
case armnn::DataType::Signed32:
return arm_compute::DataType::S32;
default:
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index 129cdbe9f1..c894f986c9 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -12,7 +12,6 @@ list(APPEND armnnBackendsCommon_sources
DynamicBackendUtils.hpp
IBackendContext.hpp
IBackendInternal.cpp
- IBackendInternal.hpp
IMemoryManager.hpp
ITensorHandle.hpp
ITensorHandleFactory.cpp
diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp
index 31706eb1e7..ec1313df0c 100644
--- a/src/backends/backendsCommon/IBackendInternal.cpp
+++ b/src/backends/backendsCommon/IBackendInternal.cpp
@@ -9,26 +9,6 @@
namespace armnn
{
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-IBackendInternal::ISubGraphConverterPtr IBackendInternal::CreateSubGraphConverter(
- const std::shared_ptr<SubGraph>& /*subGrapg*/) const
-{
- return ISubGraphConverterPtr{};
-}
-
-IBackendInternal::Optimizations IBackendInternal::GetOptimizations() const
-{
- return Optimizations{};
-}
-
-IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& /*subGraph*/,
- bool& optimizationAttempted) const
-{
- optimizationAttempted = false;
- return nullptr;
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
IMemoryManagerUniquePtr IBackendInternal::CreateMemoryManager() const
{
return IMemoryManagerUniquePtr();
@@ -120,29 +100,12 @@ IBackendInternal::ILayerSupportSharedPtr IBackendInternal::GetLayerSupport(const
return GetLayerSupport();
}
-// Default implementation of OptimizeSubgraphView for backward compatibility with the old API.
+// Default implementation of OptimizeSubgraphView. Returns an untouched subgraph.
// Override this method with a custom optimization implementation.
OptimizationViews IBackendInternal::OptimizeSubgraphView(const SubgraphView& subgraph) const
{
- bool optimizationAttempted = false;
-
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- SubGraphUniquePtr optSubgraph = OptimizeSubGraph(subgraph, optimizationAttempted);
- ARMNN_NO_DEPRECATE_WARN_END
-
OptimizationViews result;
- if (!optimizationAttempted)
- {
- result.AddUntouchedSubgraph(SubgraphView(subgraph));
- }
- else if (optSubgraph)
- {
- result.AddSubstitution({subgraph, SubgraphView(*optSubgraph.get())});
- }
- else
- {
- result.AddFailedSubgraph(SubgraphView(subgraph));
- }
+ result.AddUntouchedSubgraph(SubgraphView(subgraph));
return result;
}
diff --git a/src/backends/backendsCommon/IBackendInternal.hpp b/src/backends/backendsCommon/IBackendInternal.hpp
deleted file mode 100644
index 61ccc4f057..0000000000
--- a/src/backends/backendsCommon/IBackendInternal.hpp
+++ /dev/null
@@ -1,9 +0,0 @@
-//
-// Copyright © 2019 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-// This file is depricated and will be removed soon.
-// Please use the new header in armnn/backends instead.
-// This will use the new armnn/backends header.
-#include <armnn/backends/IBackendInternal.hpp>
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 2c3f827622..ca1acc376b 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -37,13 +37,6 @@ bool DefaultLayerSupport(const char* func,
namespace armnn
{
-bool LayerSupportBase::IsAbsSupported(const TensorInfo&, // input
- const TensorInfo&, // output
- Optional<std::string &> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input
const TensorInfo&, //output
const ActivationDescriptor&, // descriptor
@@ -238,31 +231,11 @@ bool LayerSupportBase::IsDivisionSupported(const TensorInfo&, // input0
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input,
- const TensorInfo& output,
- const ElementwiseUnaryDescriptor& descriptor,
+bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const ElementwiseUnaryDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
- if (descriptor.m_Operation == UnaryOperation::Abs)
- {
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsAbsSupported(input, output, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
- }
- else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
- {
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsRsqrtSupported(input, output, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
- }
- return false;
-}
-
-bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo&, // input0
- const armnn::TensorInfo&, // input1
- const armnn::TensorInfo&, // output
- armnn::Optional<std::string &> reasonIfUnsupported) const
-{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
@@ -301,28 +274,12 @@ bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo&, // input
bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
const armnn::TensorInfo&, // input1
const armnn::TensorInfo&, // output
- armnn::Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
- const armnn::TensorInfo&, // input1
- const armnn::TensorInfo&, // output
const GatherDescriptor&, // descriptor
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGreaterSupported(const TensorInfo&, // input0
- const TensorInfo&, // input1
- const TensorInfo&, // output
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
bool LayerSupportBase::IsInputSupported(const TensorInfo&, // input
Optional<std::string&> reasonIfUnsupported) const
{
@@ -422,14 +379,6 @@ bool LayerSupportBase::IsMergeSupported(const TensorInfo&, // input0
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
bool LayerSupportBase::IsMinimumSupported(const TensorInfo&, // input0
const TensorInfo&, // input1
const TensorInfo&, // output
@@ -553,13 +502,6 @@ bool LayerSupportBase::IsReshapeSupported(const TensorInfo&, // input
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo&, // input
- const TensorInfo&, // output
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input
const TensorInfo&, // output
const ResizeDescriptor&, // descriptor
@@ -568,13 +510,6 @@ bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsRsqrtSupported(const TensorInfo&, // input
- const TensorInfo&, // output
- Optional<std::string &> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
bool LayerSupportBase::IsShapeSupported(const TensorInfo&, // input
const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
@@ -615,13 +550,6 @@ bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo&, // input
}
bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
- const ViewsDescriptor&, // descriptor
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
const std::vector<std::reference_wrapper<TensorInfo>>&, // outputs
const ViewsDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 240b1dab73..fc2906f497 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -13,11 +13,6 @@ namespace armnn
class LayerSupportBase : public ILayerSupport
{
public:
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -149,12 +144,6 @@ public:
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -175,24 +164,12 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
- bool IsGatherSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -257,12 +234,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
- bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -346,16 +317,6 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
- bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -380,11 +341,6 @@ public:
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
- bool IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/TensorHandle.hpp b/src/backends/backendsCommon/TensorHandle.hpp
index 4e9d87d6eb..b898bd11a5 100644
--- a/src/backends/backendsCommon/TensorHandle.hpp
+++ b/src/backends/backendsCommon/TensorHandle.hpp
@@ -242,16 +242,17 @@ private:
std::shared_ptr<ConstTensorHandle> m_TensorHandle;
};
-using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstCpuTensorHandle is deprecated, "
- "use ConstTensorHandle instead") = ConstTensorHandle;
-using CpuTensorHandle ARMNN_DEPRECATED_MSG("CpuTensorHandle is deprecated, "
- "use TensorHandle instead") = TensorHandle;
-using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG("ScopedCpuTensorHandle is deprecated, "
- "use ScopedTensorHandle instead") = ScopedTensorHandle;
-using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("PassthroughCpuTensorHandle is deprecated, use "
- "PassthroughTensorHandle instead") = PassthroughTensorHandle;
-using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstPassthroughCpuTensorHandle is "
+using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstCpuTensorHandle is deprecated, "
+ "use ConstTensorHandle instead", "22.05") = ConstTensorHandle;
+using CpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("CpuTensorHandle is deprecated, "
+ "use TensorHandle instead", "22.05") = TensorHandle;
+using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ScopedCpuTensorHandle is deprecated, "
+ "use ScopedTensorHandle instead", "22.05") = ScopedTensorHandle;
+using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("PassthroughCpuTensorHandle is deprecated, use "
+ "PassthroughTensorHandle instead",
+ "22.05") = PassthroughTensorHandle;
+using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstPassthroughCpuTensorHandle is "
"deprecated, use ConstPassthroughTensorHandle "
- "instead") = ConstPassthroughTensorHandle;
+ "instead", "22.05") = ConstPassthroughTensorHandle;
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index fe22133104..27b59ea3a6 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -157,15 +157,12 @@ void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- if (tensor.GetDataType() != DataType::QSymmS8 &&
- tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
+ if (tensor.GetDataType() != DataType::QSymmS8)
{
throw InvalidArgumentException(descName +
": Expected data type which supports per-axis quantization scheme but got " +
GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
}
- ARMNN_NO_DEPRECATE_WARN_END
}
//---------------------------------------------------------------
@@ -362,15 +359,12 @@ void ValidateWeightDataType(const TensorInfo& inputInfo,
const DataType inputType = inputInfo.GetDataType();
if (IsQuantized8BitType(inputType))
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
const std::vector<DataType> validTypes =
{
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS8,
- DataType::QuantizedSymm8PerAxis // deprecated
+ DataType::QSymmS8
};
- ARMNN_NO_DEPRECATE_WARN_END
ValidateDataTypes(weightInfo, validTypes, descName);
}
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 896081ecfd..29d39d14a9 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -345,10 +345,15 @@ struct RankQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
-struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor>
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+struct
+ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ResizeBilinearQueueDescriptor is deprecated use ResizeQueueDescriptor instead",
+ "22.08")
+ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor>
{
void Validate(const WorkloadInfo& workloadInfo) const;
};
+ARMNN_NO_DEPRECATE_WARN_END
struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor>
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 666f83de71..3b7f3a0f1f 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1499,13 +1499,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
modelOptions);
}
-// Default Implementations
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
@@ -1644,12 +1637,6 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const Elemen
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*Info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
const WorkloadInfo& /*info*/) const
{
@@ -1680,12 +1667,6 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescr
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
const InstanceNormalizationQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
@@ -1753,12 +1734,6 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescrip
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
@@ -1848,24 +1823,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDes
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index c16fcb882b..df4bcd6144 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -68,10 +68,6 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const = 0;
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- virtual std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const;
-
virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
@@ -141,10 +137,6 @@ public:
virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const;
-
virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
@@ -160,10 +152,6 @@ public:
virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const;
-
virtual std::unique_ptr<IWorkload> CreateInstanceNormalization(
const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
@@ -198,10 +186,6 @@ public:
virtual std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
- ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
- virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const;
-
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
@@ -250,14 +234,6 @@ public:
virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
- ARMNN_DEPRECATED_MSG("Use CreateResize instead")
- virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const;
-
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const;
-
virtual std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 295202324e..ef507a64f8 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -34,10 +34,6 @@ public:
const bool /*IsMemoryManaged*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const override
- { return nullptr; }
-
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
@@ -111,19 +107,17 @@ public:
{
if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
{
- AbsQueueDescriptor absDescriptor;
- return CreateAbs(absDescriptor, info);
+ { return nullptr; }
}
else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
{
- RsqrtQueueDescriptor rsqrtDescriptor;
- return CreateRsqrt(rsqrtDescriptor, info);
+ { return nullptr; }
}
else if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
{
return CreateLogicalUnary(descriptor, info);
}
- return nullptr;
+ { return nullptr; }
}
std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
@@ -234,10 +228,6 @@ public:
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
- const WorkloadInfo& /*info*/) const override
- { return nullptr; }
-
std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index bd7f09b28a..fe681936f1 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -265,13 +265,9 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* we
case DataType::QAsymmU8:
weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
break;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case DataType::QuantizedSymm8PerAxis:
- ARMNN_FALLTHROUGH;
case DataType::QSymmS8:
weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer);
break;
- ARMNN_NO_DEPRECATE_WARN_END
default:
break;
}
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index e706fc8157..df1a5c19aa 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -117,11 +117,6 @@ IBackendInternal::IMemoryManagerUniquePtr MockBackend::CreateMemoryManager() con
return IMemoryManagerUniquePtr{};
}
-IBackendInternal::Optimizations MockBackend::GetOptimizations() const
-{
- return Optimizations{};
-}
-
IBackendInternal::ILayerSupportSharedPtr MockBackend::GetLayerSupport() const
{
static ILayerSupportSharedPtr layerSupport{new MockLayerSupport};
diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp
index d90ad798da..c0624525dc 100644
--- a/src/backends/backendsCommon/test/MockBackend.hpp
+++ b/src/backends/backendsCommon/test/MockBackend.hpp
@@ -162,7 +162,6 @@ public:
CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
IBackendProfilingPtr& backendProfiling) override;
- IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp
index ebe94348fc..ea6ece7b32 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp
@@ -79,11 +79,6 @@ IBackendInternal::IMemoryManagerUniquePtr MockImportBackend::CreateMemoryManager
return std::make_unique<RefMemoryManager>();
}
-IBackendInternal::Optimizations MockImportBackend::GetOptimizations() const
-{
- return Optimizations{};
-}
-
IBackendInternal::ILayerSupportSharedPtr MockImportBackend::GetLayerSupport() const
{
static ILayerSupportSharedPtr layerSupport{new MockImportLayerSupport};
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp
index ecc661f43b..c07a97c29e 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp
@@ -40,7 +40,6 @@ public:
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override;
- IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index b85232e75c..dd58e002be 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -192,11 +192,6 @@ IBackendInternal::IBackendProfilingContextPtr ClBackend::CreateBackendProfilingC
return IBackendProfilingContextPtr{};
}
-IBackendInternal::Optimizations ClBackend::GetOptimizations() const
-{
- return Optimizations{};
-}
-
IBackendInternal::IBackendSpecificModelContextPtr ClBackend::CreateBackendSpecificModelContext(
const ModelOptions& modelOptions) const
{
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index 0a069b930b..80e4b97ff4 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -78,7 +78,6 @@ public:
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override;
- IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 087302157f..9a50f4aabd 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -176,14 +176,6 @@ ClLayerSupport::ClLayerSupport()
{
}
-bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
- return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -563,15 +555,6 @@ bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
descriptor);
}
-bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ComparisonDescriptor descriptor(ComparisonOperation::Greater);
- return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
-}
-
bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
@@ -690,14 +673,6 @@ bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
descriptor);
}
-bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const MergerDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -864,29 +839,6 @@ bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
-bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ResizeDescriptor descriptor;
- descriptor.m_Method = ResizeMethod::Bilinear;
- descriptor.m_DataLayout = DataLayout::NCHW;
-
- const TensorShape& outputShape = output.GetShape();
- descriptor.m_TargetHeight = outputShape[2];
- descriptor.m_TargetWidth = outputShape[3];
-
- return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
- return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
@@ -928,17 +880,6 @@ bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
}
bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- IgnoreUnused(descriptor);
- return IsSupportedForDataTypeCl(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
-}
-
-bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 43ae428163..e7a6748f0a 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -18,11 +18,6 @@ public:
ClLayerSupport();
~ClLayerSupport() {}
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -145,12 +140,6 @@ public:
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const override;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported) const override;
-
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -196,12 +185,6 @@ public:
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
- bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const MergerDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -277,16 +260,6 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
- bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
@@ -307,11 +280,6 @@ public:
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
- bool IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 530cb690d9..3400799f45 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -194,17 +194,6 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH
PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
- elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
-
- return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -376,17 +365,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const Eleme
}
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ComparisonQueueDescriptor comparisonDescriptor;
- comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
-
- return CreateComparison(comparisonDescriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -414,17 +392,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDesc
return MakeWorkload<ClGatherWorkload>(descriptor, info, m_CLCompileContext);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ComparisonQueueDescriptor comparisonDescriptor;
- comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
-
- return CreateComparison(comparisonDescriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -507,12 +474,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQue
return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return CreateConcat(descriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -609,32 +570,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDesc
return MakeWorkload<ClResizeWorkload>(descriptor, info, m_CLCompileContext);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- ResizeQueueDescriptor resizeDescriptor;
- resizeDescriptor.m_Inputs = descriptor.m_Inputs;
- resizeDescriptor.m_Outputs = descriptor.m_Outputs;
-
- resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
- resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
-
- return CreateResize(resizeDescriptor, info);
-}
-
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
- elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
-
- return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 7f01ee0918..3ca33c891e 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -55,10 +55,6 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -119,10 +115,6 @@ public:
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -135,10 +127,6 @@ public:
std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -169,10 +157,6 @@ public:
std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
- std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -221,14 +205,6 @@ public:
std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateResize instead")
- std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 41b97c1e16..7d378fc656 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -129,9 +129,6 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
case DataType::QAsymmU8:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
break;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case DataType::QuantizedSymm8PerAxis:
- ARMNN_FALLTHROUGH;
case DataType::QAsymmS8:
case DataType::QSymmS8:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>());
@@ -139,7 +136,6 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
case DataType::QSymmS16:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int16_t>());
break;
- ARMNN_NO_DEPRECATE_WARN_END
case DataType::Signed32:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
break;
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 17876753fb..2c3abfd70d 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -103,11 +103,6 @@ IBackendInternal::IBackendProfilingContextPtr NeonBackend::CreateBackendProfilin
return IBackendProfilingContextPtr{};
}
-IBackendInternal::Optimizations NeonBackend::GetOptimizations() const
-{
- return Optimizations{};
-}
-
IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext(
const ModelOptions& modelOptions) const
{
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index 20da73aaf8..d28ac3bfcd 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -48,7 +48,6 @@ public:
IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override;
- IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index ec64f902da..d742229bbe 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -145,14 +145,6 @@ NeonLayerSupport::NeonLayerSupport()
{
}
-bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
- return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -537,15 +529,6 @@ bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
descriptor);
}
-bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
- const armnn::TensorInfo& input1,
- const armnn::TensorInfo& output,
- armnn::Optional<std::string&> reasonIfUnsupported) const
-{
- ComparisonDescriptor descriptor(ComparisonOperation::Greater);
- return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
-}
-
bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
@@ -653,14 +636,6 @@ bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
descriptor);
}
-bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const MergerDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -852,29 +827,6 @@ bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
descriptor);
}
-bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ResizeDescriptor descriptor;
- descriptor.m_Method = ResizeMethod::Bilinear;
- descriptor.m_DataLayout = DataLayout::NCHW;
-
- const TensorShape& outputShape = output.GetShape();
- descriptor.m_TargetHeight = outputShape[2];
- descriptor.m_TargetWidth = outputShape[3];
-
- return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
- return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
@@ -920,17 +872,6 @@ bool NeonLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
}
bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- armnn::IgnoreUnused(descriptor);
- return IsSupportedForDataTypeNeon(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
-}
-
-bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index fc1e1f6125..155d96acdc 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -19,11 +19,6 @@ public:
~NeonLayerSupport() {}
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -150,12 +145,6 @@ public:
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const override;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -201,12 +190,6 @@ public:
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
- bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const MergerDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -287,16 +270,6 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
- bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
@@ -317,11 +290,6 @@ public:
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
- bool IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 9ec7583b18..605b03d6b7 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -131,17 +131,6 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const Ten
return tensorHandle;
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
- elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
-
- return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -323,17 +312,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
}
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ComparisonQueueDescriptor comparisonDescriptor;
- comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
-
- return CreateComparison(comparisonDescriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -358,17 +336,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::Gather
return std::make_unique<NeonGatherWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
-
- ComparisonQueueDescriptor comparisonDescriptor;
- comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
-
- return CreateComparison(comparisonDescriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -449,12 +416,6 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemImport(const Mem
return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return CreateConcat(descriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -552,32 +513,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDe
return std::make_unique<NeonResizeWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
- const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- ResizeQueueDescriptor resizeDescriptor;
- resizeDescriptor.m_Inputs = descriptor.m_Inputs;
- resizeDescriptor.m_Outputs = descriptor.m_Outputs;
-
- resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
- resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
- return CreateResize(resizeDescriptor, info);
-}
-
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
- const WorkloadInfo &info) const
-{
- IgnoreUnused(descriptor);
-
- ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
- elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
-
- return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 41fc506aaa..bd84c057f5 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -52,10 +52,6 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -122,10 +118,6 @@ public:
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -138,10 +130,6 @@ public:
std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -172,10 +160,6 @@ public:
std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
- std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -224,14 +208,6 @@ public:
std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateResize instead")
- std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 1199f30863..f51493d383 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -73,14 +73,10 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
case DataType::QAsymmU8:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
break;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case DataType::QuantizedSymm8PerAxis:
- ARMNN_FALLTHROUGH;
case DataType::QSymmS8:
case DataType::QAsymmS8:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
break;
- ARMNN_NO_DEPRECATE_WARN_END
case DataType::Signed32:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
break;
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index c9f164e0c9..a3060f0798 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -58,27 +58,12 @@ IBackendInternal::IMemoryManagerUniquePtr RefBackend::CreateMemoryManager() cons
return std::make_unique<RefMemoryManager>();
}
-IBackendInternal::Optimizations RefBackend::GetOptimizations() const
-{
- return Optimizations{};
-}
-
IBackendInternal::ILayerSupportSharedPtr RefBackend::GetLayerSupport() const
{
static ILayerSupportSharedPtr layerSupport{new RefLayerSupport};
return layerSupport;
}
-bool RefBackend::HasCapability(BackendCapability capabilityClass) const
-{
- auto search = oldCpuRefCapabilities.find(capabilityClass);
- if (search != oldCpuRefCapabilities.end())
- {
- return true;
- }
- return false;
-}
-
OptimizationViews RefBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
{
OptimizationViews optimizationViews;
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 28c1591179..c04bf43db3 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -47,7 +47,6 @@ public:
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override;
- IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
@@ -60,8 +59,6 @@ public:
{
return cpuRefCapabilities;
};
-
- bool HasCapability(BackendCapability capabilityClass) const override;
};
} // namespace armnn
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index c0ede678bf..b80aa9992f 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -58,15 +58,6 @@ std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
} // anonymous namespace
-bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsElementwiseUnarySupported(input,
- output,
- ElementwiseUnaryDescriptor(UnaryOperation::Abs),
- reasonIfUnsupported);
-}
-
bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -565,15 +556,12 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
const DataType inputType = input.GetDataType();
if (IsQuantized8BitType(inputType))
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::array<DataType, 4> supportedWeightTypes =
+ std::array<DataType, 3> supportedWeightTypes =
{
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS8,
- DataType::QuantizedSymm8PerAxis // deprecated
+ DataType::QSymmS8
};
- ARMNN_NO_DEPRECATE_WARN_END
supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
"Reference Convolution2d: weights type not supported for quantized input.");
@@ -769,15 +757,12 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
const DataType inputType = input.GetDataType();
if (IsQuantized8BitType(inputType))
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::array<DataType, 4> supportedWeightTypes =
+ std::array<DataType, 3> supportedWeightTypes =
{
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QuantizedSymm8PerAxis // deprecated
};
- ARMNN_NO_DEPRECATE_WARN_END
supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
"Reference DepthwiseConvolution2d: weights type not supported for "
@@ -977,18 +962,6 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
return supported;
}
-bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsComparisonSupported(input0,
- input1,
- output,
- ComparisonDescriptor(ComparisonOperation::Equal),
- reasonIfUnsupported);
-}
-
bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
@@ -1173,18 +1146,6 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0,
return supported;
}
-bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsComparisonSupported(input0,
- input1,
- output,
- ComparisonDescriptor(ComparisonOperation::Greater),
- reasonIfUnsupported);
-}
-
bool RefLayerSupport::IsInputSupported(const TensorInfo& /*input*/,
Optional<std::string&> /*reasonIfUnsupported*/) const
{
@@ -1523,14 +1484,6 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
return supported;
}
-bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const MergerDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
const TensorInfo &output,
Optional<std::string &> reasonIfUnsupported) const
@@ -1897,33 +1850,6 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
"Reference reshape: input type not supported.");
}
-bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- bool supported = true;
- std::array<DataType,6> supportedTypes =
- {
- DataType::BFloat16,
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmS8,
- DataType::QAsymmU8,
- DataType::QSymmS16
- };
-
- supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference ResizeBilinear: input type not supported");
-
- supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
- "Reference ResizeBilinear: output type not supported");
-
- supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference ResizeBilinear: input and output types not matching");
-
- return supported;
-}
-
bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
@@ -1953,16 +1879,6 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
return supported;
}
-bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return IsElementwiseUnarySupported(input,
- output,
- ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
- reasonIfUnsupported);
-}
-
bool RefLayerSupport::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -2101,28 +2017,6 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
}
bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- IgnoreUnused(descriptor);
- bool supported = true;
- std::array<DataType,6> supportedTypes =
- {
- DataType::BFloat16,
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmS8,
- DataType::QAsymmU8,
- DataType::QSymmS16
- };
-
- supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference splitter: input type not supported");
-
- return supported;
-}
-
-bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
@@ -2322,15 +2216,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const DataType inputType = input.GetDataType();
if (IsQuantized8BitType(inputType))
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::array<DataType, 4> supportedWeightTypes =
+ std::array<DataType, 3> supportedWeightTypes =
{
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS8,
- DataType::QuantizedSymm8PerAxis //Deprecated
+ DataType::QSymmS8
};
- ARMNN_NO_DEPRECATE_WARN_END
supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
"Reference TransposeConvolution2d: weights type not supported for "
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 627418e3e1..53d7907204 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -12,11 +12,6 @@ namespace armnn
class RefLayerSupport : public LayerSupportBase
{
public:
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsAbsSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -147,12 +142,6 @@ public:
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsEqualSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -179,12 +168,6 @@ public:
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
- bool IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -230,12 +213,6 @@ public:
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
- bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const MergerDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMemCopySupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -301,20 +278,11 @@ public:
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
- bool IsRsqrtSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -340,11 +308,6 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional())
const override;
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
- bool IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 18a5af277f..75008bc866 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -129,16 +129,6 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
- ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
- elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
-
- return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -331,16 +321,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const Elem
return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
- ComparisonQueueDescriptor comparisonDescriptor;
- comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
-
- return CreateComparison(comparisonDescriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -379,16 +359,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDes
return std::make_unique<RefGatherWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
- ComparisonQueueDescriptor comparisonDescriptor;
- comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
-
- return CreateComparison(comparisonDescriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -479,12 +449,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQu
return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return CreateConcat(descriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -615,28 +579,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDes
return std::make_unique<RefResizeWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- ResizeQueueDescriptor resizeDescriptor;
- resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
- resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
- return CreateResize(resizeDescriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- IgnoreUnused(descriptor);
- ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
- elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
-
- return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateShape(const ShapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index d00d3ca822..a85e8dda3e 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -66,10 +66,6 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -139,10 +135,6 @@ public:
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -158,10 +150,6 @@ public:
std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
- std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -192,10 +180,6 @@ public:
std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
- std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -241,14 +225,6 @@ public:
std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateResize instead")
- std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
- std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 7d6c59a273..f8169a6c0c 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -88,7 +88,6 @@ BACKEND_SOURCES := \
workloads/RefQuantizeWorkload.cpp \
workloads/RefReduceWorkload.cpp \
workloads/RefReshapeWorkload.cpp \
- workloads/RefResizeBilinearWorkload.cpp \
workloads/RefResizeWorkload.cpp \
workloads/RefSliceWorkload.cpp \
workloads/RefSoftmaxWorkload.cpp \
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index e169c03ad8..5727291be3 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -148,8 +148,6 @@ list(APPEND armnnRefBackendWorkloads_sources
RefReduceWorkload.hpp
RefReshapeWorkload.cpp
RefReshapeWorkload.hpp
- RefResizeBilinearWorkload.cpp
- RefResizeBilinearWorkload.hpp
RefResizeWorkload.cpp
RefResizeWorkload.hpp
RefShapeWorkload.hpp
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index cd0dc5d40f..c2a456bfce 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -67,13 +67,6 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
{
switch(info.GetDataType())
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case armnn::DataType::QuantizedSymm8PerAxis:
- {
- std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
- return std::make_unique<QSymm8PerAxisDecoder>(static_cast<const int8_t*>(data), info);
- }
- ARMNN_NO_DEPRECATE_WARN_END
case DataType::QAsymmS8:
{
return std::make_unique<QASymmS8Decoder>(
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index a2d565ec4a..a7be9e172b 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -22,16 +22,6 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
{
switch(info.GetDataType())
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- case armnn::DataType::QuantizedSymm8PerAxis:
- {
- std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
- return std::make_unique<QSymm8PerAxisEncoder>(
- static_cast<int8_t*>(data),
- params.second,
- params.first);
- }
- ARMNN_NO_DEPRECATE_WARN_END
case armnn::DataType::QAsymmS8:
{
return std::make_unique<QASymmS8Encoder>(
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
deleted file mode 100644
index 2cf5888f33..0000000000
--- a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefResizeBilinearWorkload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "Resize.hpp"
-#include "BaseIterator.hpp"
-#include "Profiling.hpp"
-
-#include "BaseIterator.hpp"
-#include "Decoders.hpp"
-#include "Encoders.hpp"
-
-namespace armnn
-{
-
-void RefResizeBilinearWorkload::Execute() const
-{
- Execute(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefResizeBilinearWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
-{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
-}
-
-void RefResizeBilinearWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearWorkload_Execute");
-
- const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
- const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
-
- std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, inputs[0]->Map());
- Decoder<float> &decoder = *decoderPtr;
- std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, outputs[0]->Map());
- Encoder<float> &encoder = *encoderPtr;
-
- Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, armnn::ResizeMethod::Bilinear);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
deleted file mode 100644
index 5ada3d1ff8..0000000000
--- a/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
- using BaseWorkload<ResizeBilinearQueueDescriptor>::BaseWorkload;
- void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
-private:
- void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index ed3aa90e5f..914137c23d 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -59,7 +59,6 @@
#include "RefRankWorkload.hpp"
#include "RefReduceWorkload.hpp"
#include "RefReshapeWorkload.hpp"
-#include "RefResizeBilinearWorkload.hpp"
#include "RefResizeWorkload.hpp"
#include "RefShapeWorkload.hpp"
#include "RefSliceWorkload.hpp"