diff options
author | Jan Eilers <jan.eilers@arm.com> | 2021-09-24 15:45:46 +0100 |
---|---|---|
committer | Jan Eilers <jan.eilers@arm.com> | 2021-10-02 16:27:39 +0100 |
commit | 1b2654fb799c3d25ffcef4d31b5d026d359e2f8f (patch) | |
tree | 0397fdf24f286715e26a0e63bddaa0502f64caf7 /src/backends | |
parent | b63a31170aee1d28267d83a4bc67b57708fb6b05 (diff) | |
download | armnn-1b2654fb799c3d25ffcef4d31b5d026d359e2f8f.tar.gz |
IVGCVSW-5985 Remove deprecated code
* Removes deprecated AddLayer, IsLayerSupported functions
* Marks the whole LayerVisitor class as deprecated not just the
constructor. This required to wrap all Accept functions in a
no deprecate macro because the LayerVisitor is used as a parameter in
there
* Removes usage of deprecated LayerVisitor and replaces it
with ExecuteStrategy. This required a few structural changes
in the unit tests
* Adds a default implementation for IStrategy called StrategyBase
* Changes pyarmnn to use non deprecated constructor for
INetworkProperties and adds related unit test
* Marks usage of deprecated code in pyarmnn as deprecated. This
required to extend INetworkProperties to allow backwards compatibility
* Removes deprecated functions from CpuAcc, GpuAcc and Ref backends
Note: This patch breaks compatibility with backends that are not
updated in this patch
!android-nn-driver:6325
Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Id13b6f37a74d26eadeda2da1dc92915e725ed5a5
Diffstat (limited to 'src/backends')
44 files changed, 32 insertions, 1002 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp index 98b5adafbc..06309319f3 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp @@ -37,10 +37,6 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi { return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8; } - ARMNN_NO_DEPRECATE_WARN_BEGIN - case armnn::DataType::QuantizedSymm8PerAxis: - return arm_compute::DataType::QSYMM8_PER_CHANNEL; - ARMNN_NO_DEPRECATE_WARN_END case armnn::DataType::Signed32: return arm_compute::DataType::S32; default: diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt index 129cdbe9f1..c894f986c9 100644 --- a/src/backends/backendsCommon/CMakeLists.txt +++ b/src/backends/backendsCommon/CMakeLists.txt @@ -12,7 +12,6 @@ list(APPEND armnnBackendsCommon_sources DynamicBackendUtils.hpp IBackendContext.hpp IBackendInternal.cpp - IBackendInternal.hpp IMemoryManager.hpp ITensorHandle.hpp ITensorHandleFactory.cpp diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp index 31706eb1e7..ec1313df0c 100644 --- a/src/backends/backendsCommon/IBackendInternal.cpp +++ b/src/backends/backendsCommon/IBackendInternal.cpp @@ -9,26 +9,6 @@ namespace armnn { -ARMNN_NO_DEPRECATE_WARN_BEGIN -IBackendInternal::ISubGraphConverterPtr IBackendInternal::CreateSubGraphConverter( - const std::shared_ptr<SubGraph>& /*subGrapg*/) const -{ - return ISubGraphConverterPtr{}; -} - -IBackendInternal::Optimizations IBackendInternal::GetOptimizations() const -{ - return Optimizations{}; -} - -IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& /*subGraph*/, - bool& optimizationAttempted) const -{ - optimizationAttempted = false; - return nullptr; -} -ARMNN_NO_DEPRECATE_WARN_END - IMemoryManagerUniquePtr IBackendInternal::CreateMemoryManager() const { return IMemoryManagerUniquePtr(); @@ -120,29 +100,12 @@ IBackendInternal::ILayerSupportSharedPtr IBackendInternal::GetLayerSupport(const return GetLayerSupport(); } -// Default implementation of OptimizeSubgraphView for backward compatibility with the old API. +// Default implementation of OptimizeSubgraphView. Returns an untouched subgraph. // Override this method with a custom optimization implementation. OptimizationViews IBackendInternal::OptimizeSubgraphView(const SubgraphView& subgraph) const { - bool optimizationAttempted = false; - - ARMNN_NO_DEPRECATE_WARN_BEGIN - SubGraphUniquePtr optSubgraph = OptimizeSubGraph(subgraph, optimizationAttempted); - ARMNN_NO_DEPRECATE_WARN_END - OptimizationViews result; - if (!optimizationAttempted) - { - result.AddUntouchedSubgraph(SubgraphView(subgraph)); - } - else if (optSubgraph) - { - result.AddSubstitution({subgraph, SubgraphView(*optSubgraph.get())}); - } - else - { - result.AddFailedSubgraph(SubgraphView(subgraph)); - } + result.AddUntouchedSubgraph(SubgraphView(subgraph)); return result; } diff --git a/src/backends/backendsCommon/IBackendInternal.hpp b/src/backends/backendsCommon/IBackendInternal.hpp deleted file mode 100644 index 61ccc4f057..0000000000 --- a/src/backends/backendsCommon/IBackendInternal.hpp +++ /dev/null @@ -1,9 +0,0 @@ -// -// Copyright © 2019 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -// This file is depricated and will be removed soon. -// Please use the new header in armnn/backends instead. -// This will use the new armnn/backends header. -#include <armnn/backends/IBackendInternal.hpp> diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 2c3f827622..ca1acc376b 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -37,13 +37,6 @@ bool DefaultLayerSupport(const char* func, namespace armnn { -bool LayerSupportBase::IsAbsSupported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string &> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input const TensorInfo&, //output const ActivationDescriptor&, // descriptor @@ -238,31 +231,11 @@ bool LayerSupportBase::IsDivisionSupported(const TensorInfo&, // input0 return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input, - const TensorInfo& output, - const ElementwiseUnaryDescriptor& descriptor, +bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo&, // input + const TensorInfo&, // output + const ElementwiseUnaryDescriptor&, // descriptor Optional<std::string&> reasonIfUnsupported) const { - if (descriptor.m_Operation == UnaryOperation::Abs) - { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsAbsSupported(input, output, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END - } - else if (descriptor.m_Operation == UnaryOperation::Rsqrt) - { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsRsqrtSupported(input, output, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END - } - return false; -} - -bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo&, // input0 - const armnn::TensorInfo&, // input1 - const armnn::TensorInfo&, // output - armnn::Optional<std::string &> reasonIfUnsupported) const -{ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } @@ -301,28 +274,12 @@ bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo&, // input bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0 const armnn::TensorInfo&, // input1 const armnn::TensorInfo&, // output - armnn::Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - -bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0 - const armnn::TensorInfo&, // input1 - const armnn::TensorInfo&, // output const GatherDescriptor&, // descriptor armnn::Optional<std::string&> reasonIfUnsupported) const { return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsGreaterSupported(const TensorInfo&, // input0 - const TensorInfo&, // input1 - const TensorInfo&, // output - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsInputSupported(const TensorInfo&, // input Optional<std::string&> reasonIfUnsupported) const { @@ -422,14 +379,6 @@ bool LayerSupportBase::IsMergeSupported(const TensorInfo&, // input0 return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const OriginsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); -} - bool LayerSupportBase::IsMinimumSupported(const TensorInfo&, // input0 const TensorInfo&, // input1 const TensorInfo&, // output @@ -553,13 +502,6 @@ bool LayerSupportBase::IsReshapeSupported(const TensorInfo&, // input return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input const TensorInfo&, // output const ResizeDescriptor&, // descriptor @@ -568,13 +510,6 @@ bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsRsqrtSupported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string &> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsShapeSupported(const TensorInfo&, // input const TensorInfo&, // output Optional<std::string&> reasonIfUnsupported) const @@ -615,13 +550,6 @@ bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo&, // input } bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input - const ViewsDescriptor&, // descriptor - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - -bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input const std::vector<std::reference_wrapper<TensorInfo>>&, // outputs const ViewsDescriptor&, // descriptor Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 240b1dab73..fc2906f497 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -13,11 +13,6 @@ namespace armnn class LayerSupportBase : public ILayerSupport { public: - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsAbsSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -149,12 +144,6 @@ public: const ElementwiseUnaryDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") - bool IsEqualSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -175,24 +164,12 @@ public: const FullyConnectedDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead") - bool IsGatherSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsGatherSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, const GatherDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") - bool IsGreaterSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsInputSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -257,12 +234,6 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") - bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const OriginsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -346,16 +317,6 @@ public: const ResizeDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") - bool IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -380,11 +341,6 @@ public: const SpaceToDepthDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead") - bool IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, diff --git a/src/backends/backendsCommon/TensorHandle.hpp b/src/backends/backendsCommon/TensorHandle.hpp index 4e9d87d6eb..b898bd11a5 100644 --- a/src/backends/backendsCommon/TensorHandle.hpp +++ b/src/backends/backendsCommon/TensorHandle.hpp @@ -242,16 +242,17 @@ private: std::shared_ptr<ConstTensorHandle> m_TensorHandle; }; -using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstCpuTensorHandle is deprecated, " - "use ConstTensorHandle instead") = ConstTensorHandle; -using CpuTensorHandle ARMNN_DEPRECATED_MSG("CpuTensorHandle is deprecated, " - "use TensorHandle instead") = TensorHandle; -using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG("ScopedCpuTensorHandle is deprecated, " - "use ScopedTensorHandle instead") = ScopedTensorHandle; -using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("PassthroughCpuTensorHandle is deprecated, use " - "PassthroughTensorHandle instead") = PassthroughTensorHandle; -using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstPassthroughCpuTensorHandle is " +using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstCpuTensorHandle is deprecated, " + "use ConstTensorHandle instead", "22.05") = ConstTensorHandle; +using CpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("CpuTensorHandle is deprecated, " + "use TensorHandle instead", "22.05") = TensorHandle; +using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ScopedCpuTensorHandle is deprecated, " + "use ScopedTensorHandle instead", "22.05") = ScopedTensorHandle; +using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("PassthroughCpuTensorHandle is deprecated, use " + "PassthroughTensorHandle instead", + "22.05") = PassthroughTensorHandle; +using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstPassthroughCpuTensorHandle is " "deprecated, use ConstPassthroughTensorHandle " - "instead") = ConstPassthroughTensorHandle; + "instead", "22.05") = ConstPassthroughTensorHandle; } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index fe22133104..27b59ea3a6 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -157,15 +157,12 @@ void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType, void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - if (tensor.GetDataType() != DataType::QSymmS8 && - tensor.GetDataType() != DataType::QuantizedSymm8PerAxis) + if (tensor.GetDataType() != DataType::QSymmS8) { throw InvalidArgumentException(descName + ": Expected data type which supports per-axis quantization scheme but got " + GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor."); } - ARMNN_NO_DEPRECATE_WARN_END } //--------------------------------------------------------------- @@ -362,15 +359,12 @@ void ValidateWeightDataType(const TensorInfo& inputInfo, const DataType inputType = inputInfo.GetDataType(); if (IsQuantized8BitType(inputType)) { - ARMNN_NO_DEPRECATE_WARN_BEGIN const std::vector<DataType> validTypes = { DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated + DataType::QSymmS8 }; - ARMNN_NO_DEPRECATE_WARN_END ValidateDataTypes(weightInfo, validTypes, descName); } diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 896081ecfd..29d39d14a9 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -345,10 +345,15 @@ struct RankQueueDescriptor : QueueDescriptor void Validate(const WorkloadInfo& workloadInfo) const; }; -struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor> +ARMNN_NO_DEPRECATE_WARN_BEGIN +struct +ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ResizeBilinearQueueDescriptor is deprecated use ResizeQueueDescriptor instead", + "22.08") +ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor> { void Validate(const WorkloadInfo& workloadInfo) const; }; +ARMNN_NO_DEPRECATE_WARN_END struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor> { diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 666f83de71..3b7f3a0f1f 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -1499,13 +1499,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, modelOptions); } -// Default Implementations -std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { @@ -1644,12 +1637,6 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const Elemen return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*Info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { @@ -1680,12 +1667,6 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescr return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization( const InstanceNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const @@ -1753,12 +1734,6 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescrip return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { @@ -1848,24 +1823,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDes return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index c16fcb882b..df4bcd6144 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -68,10 +68,6 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const = 0; - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - virtual std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const; - virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -141,10 +137,6 @@ public: virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& Info) const; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& Info) const; - virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -160,10 +152,6 @@ public: virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info) const; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const; - virtual std::unique_ptr<IWorkload> CreateInstanceNormalization( const InstanceNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -198,10 +186,6 @@ public: virtual std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor, const WorkloadInfo& info) const; - ARMNN_DEPRECATED_MSG("Use CreateConcat instead") - virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const; - virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -250,14 +234,6 @@ public: virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const; - ARMNN_DEPRECATED_MSG("Use CreateResize instead") - virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const; - - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, - const WorkloadInfo& info) const; - virtual std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 295202324e..ef507a64f8 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -34,10 +34,6 @@ public: const bool /*IsMemoryManaged*/) const override { return nullptr; } - std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const override - { return nullptr; } - std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } @@ -111,19 +107,17 @@ public: { if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) { - AbsQueueDescriptor absDescriptor; - return CreateAbs(absDescriptor, info); + { return nullptr; } } else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) { - RsqrtQueueDescriptor rsqrtDescriptor; - return CreateRsqrt(rsqrtDescriptor, info); + { return nullptr; } } else if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot) { return CreateLogicalUnary(descriptor, info); } - return nullptr; + { return nullptr; } } std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/, @@ -234,10 +228,6 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } - std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/, - const WorkloadInfo& /*info*/) const override - { return nullptr; } - std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp index bd7f09b28a..fe681936f1 100644 --- a/src/backends/backendsCommon/WorkloadUtils.cpp +++ b/src/backends/backendsCommon/WorkloadUtils.cpp @@ -265,13 +265,9 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* we case DataType::QAsymmU8: weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer); break; - ARMNN_NO_DEPRECATE_WARN_BEGIN - case DataType::QuantizedSymm8PerAxis: - ARMNN_FALLTHROUGH; case DataType::QSymmS8: weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer); break; - ARMNN_NO_DEPRECATE_WARN_END default: break; } diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp index e706fc8157..df1a5c19aa 100644 --- a/src/backends/backendsCommon/test/MockBackend.cpp +++ b/src/backends/backendsCommon/test/MockBackend.cpp @@ -117,11 +117,6 @@ IBackendInternal::IMemoryManagerUniquePtr MockBackend::CreateMemoryManager() con return IMemoryManagerUniquePtr{}; } -IBackendInternal::Optimizations MockBackend::GetOptimizations() const -{ - return Optimizations{}; -} - IBackendInternal::ILayerSupportSharedPtr MockBackend::GetLayerSupport() const { static ILayerSupportSharedPtr layerSupport{new MockLayerSupport}; diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp index d90ad798da..c0624525dc 100644 --- a/src/backends/backendsCommon/test/MockBackend.hpp +++ b/src/backends/backendsCommon/test/MockBackend.hpp @@ -162,7 +162,6 @@ public: CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override; - IBackendInternal::Optimizations GetOptimizations() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override; OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override; diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp index ebe94348fc..ea6ece7b32 100644 --- a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp +++ b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp @@ -79,11 +79,6 @@ IBackendInternal::IMemoryManagerUniquePtr MockImportBackend::CreateMemoryManager return std::make_unique<RefMemoryManager>(); } -IBackendInternal::Optimizations MockImportBackend::GetOptimizations() const -{ - return Optimizations{}; -} - IBackendInternal::ILayerSupportSharedPtr MockImportBackend::GetLayerSupport() const { static ILayerSupportSharedPtr layerSupport{new MockImportLayerSupport}; diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp index ecc661f43b..c07a97c29e 100644 --- a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp +++ b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp @@ -40,7 +40,6 @@ public: IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext( const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override; - IBackendInternal::Optimizations GetOptimizations() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override; OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override; diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index b85232e75c..dd58e002be 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -192,11 +192,6 @@ IBackendInternal::IBackendProfilingContextPtr ClBackend::CreateBackendProfilingC return IBackendProfilingContextPtr{}; } -IBackendInternal::Optimizations ClBackend::GetOptimizations() const -{ - return Optimizations{}; -} - IBackendInternal::IBackendSpecificModelContextPtr ClBackend::CreateBackendSpecificModelContext( const ModelOptions& modelOptions) const { diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp index 0a069b930b..80e4b97ff4 100644 --- a/src/backends/cl/ClBackend.hpp +++ b/src/backends/cl/ClBackend.hpp @@ -78,7 +78,6 @@ public: IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext( const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override; - IBackendInternal::Optimizations GetOptimizations() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override; diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 087302157f..9a50f4aabd 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -176,14 +176,6 @@ ClLayerSupport::ClLayerSupport() { } -bool ClLayerSupport::IsAbsSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs); - return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); -} - bool ClLayerSupport::IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -563,15 +555,6 @@ bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0, descriptor); } -bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ComparisonDescriptor descriptor(ComparisonOperation::Greater); - return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported); -} - bool ClLayerSupport::IsInputSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported) const { @@ -690,14 +673,6 @@ bool ClLayerSupport::IsMeanSupported(const TensorInfo& input, descriptor); } -bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const MergerDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); -} - bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -864,29 +839,6 @@ bool ClLayerSupport::IsResizeSupported(const TensorInfo& input, FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } -bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ResizeDescriptor descriptor; - descriptor.m_Method = ResizeMethod::Bilinear; - descriptor.m_DataLayout = DataLayout::NCHW; - - const TensorShape& outputShape = output.GetShape(); - descriptor.m_TargetHeight = outputShape[2]; - descriptor.m_TargetWidth = outputShape[3]; - - return IsResizeSupported(input, output, descriptor, reasonIfUnsupported); -} - -bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); - return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); -} - bool ClLayerSupport::IsSliceSupported(const TensorInfo& input, const TensorInfo& output, const SliceDescriptor& descriptor, @@ -928,17 +880,6 @@ bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, } bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - IgnoreUnused(descriptor); - return IsSupportedForDataTypeCl(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 43ae428163..e7a6748f0a 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -18,11 +18,6 @@ public: ClLayerSupport(); ~ClLayerSupport() {} - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsAbsSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -145,12 +140,6 @@ public: const GatherDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const override; - ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") - bool IsGreaterSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& ouput, - Optional<std::string&> reasonIfUnsupported) const override; - bool IsInputSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -196,12 +185,6 @@ public: const MeanDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") - bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const MergerDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -277,16 +260,6 @@ public: const ResizeDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") - bool IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsSliceSupported(const TensorInfo& input, const TensorInfo& output, const SliceDescriptor& descriptor, @@ -307,11 +280,6 @@ public: const SpaceToDepthDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead") - bool IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index 530cb690d9..3400799f45 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -194,17 +194,6 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords); } -std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; - elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs); - - return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); -} - std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -376,17 +365,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const Eleme } } -std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ComparisonQueueDescriptor comparisonDescriptor; - comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal); - - return CreateComparison(comparisonDescriptor, info); -} - std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -414,17 +392,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDesc return MakeWorkload<ClGatherWorkload>(descriptor, info, m_CLCompileContext); } -std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ComparisonQueueDescriptor comparisonDescriptor; - comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater); - - return CreateComparison(comparisonDescriptor, info); -} - std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -507,12 +474,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQue return std::make_unique<ImportMemGenericWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return CreateConcat(descriptor, info); -} - std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -609,32 +570,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDesc return MakeWorkload<ClResizeWorkload>(descriptor, info, m_CLCompileContext); } -std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - ResizeQueueDescriptor resizeDescriptor; - resizeDescriptor.m_Inputs = descriptor.m_Inputs; - resizeDescriptor.m_Outputs = descriptor.m_Outputs; - - resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear; - resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout; - resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight; - resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth; - - return CreateResize(resizeDescriptor, info); -} - -std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; - elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt); - - return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); -} - std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 7f01ee0918..3ca33c891e 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -55,10 +55,6 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -119,10 +115,6 @@ public: std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -135,10 +127,6 @@ public: std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -169,10 +157,6 @@ public: std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateConcat instead") - std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -221,14 +205,6 @@ public: std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateResize instead") - std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp index 41b97c1e16..7d378fc656 100644 --- a/src/backends/cl/workloads/ClWorkloadUtils.hpp +++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp @@ -129,9 +129,6 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor, case DataType::QAsymmU8: CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>()); break; - ARMNN_NO_DEPRECATE_WARN_BEGIN - case DataType::QuantizedSymm8PerAxis: - ARMNN_FALLTHROUGH; case DataType::QAsymmS8: case DataType::QSymmS8: CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>()); @@ -139,7 +136,6 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor, case DataType::QSymmS16: CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int16_t>()); break; - ARMNN_NO_DEPRECATE_WARN_END case DataType::Signed32: CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>()); break; diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index 17876753fb..2c3abfd70d 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -103,11 +103,6 @@ IBackendInternal::IBackendProfilingContextPtr NeonBackend::CreateBackendProfilin return IBackendProfilingContextPtr{}; } -IBackendInternal::Optimizations NeonBackend::GetOptimizations() const -{ - return Optimizations{}; -} - IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext( const ModelOptions& modelOptions) const { diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp index 20da73aaf8..d28ac3bfcd 100644 --- a/src/backends/neon/NeonBackend.hpp +++ b/src/backends/neon/NeonBackend.hpp @@ -48,7 +48,6 @@ public: IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override; IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext( const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override; - IBackendInternal::Optimizations GetOptimizations() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override; diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index ec64f902da..d742229bbe 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -145,14 +145,6 @@ NeonLayerSupport::NeonLayerSupport() { } -bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs); - return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); -} - bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -537,15 +529,6 @@ bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0, descriptor); } -bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0, - const armnn::TensorInfo& input1, - const armnn::TensorInfo& output, - armnn::Optional<std::string&> reasonIfUnsupported) const -{ - ComparisonDescriptor descriptor(ComparisonOperation::Greater); - return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported); -} - bool NeonLayerSupport::IsInputSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported) const { @@ -653,14 +636,6 @@ bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input, descriptor); } -bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const MergerDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); -} - bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -852,29 +827,6 @@ bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input, descriptor); } -bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ResizeDescriptor descriptor; - descriptor.m_Method = ResizeMethod::Bilinear; - descriptor.m_DataLayout = DataLayout::NCHW; - - const TensorShape& outputShape = output.GetShape(); - descriptor.m_TargetHeight = outputShape[2]; - descriptor.m_TargetWidth = outputShape[3]; - - return IsResizeSupported(input, output, descriptor, reasonIfUnsupported); -} - -bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); - return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); -} - bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input, const TensorInfo& output, const SliceDescriptor& descriptor, @@ -920,17 +872,6 @@ bool NeonLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, } bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - armnn::IgnoreUnused(descriptor); - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index fc1e1f6125..155d96acdc 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -19,11 +19,6 @@ public: ~NeonLayerSupport() {} - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsAbsSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -150,12 +145,6 @@ public: const GatherDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const override; - ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") - bool IsGreaterSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsInputSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -201,12 +190,6 @@ public: const MeanDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") - bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const MergerDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -287,16 +270,6 @@ public: const ResizeDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") - bool IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsSliceSupported(const TensorInfo& input, const TensorInfo& output, const SliceDescriptor& descriptor, @@ -317,11 +290,6 @@ public: const SpaceToDepthDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead") - bool IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 9ec7583b18..605b03d6b7 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -131,17 +131,6 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const Ten return tensorHandle; } -std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; - elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs); - - return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -323,17 +312,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary( } } -std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ComparisonQueueDescriptor comparisonDescriptor; - comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal); - - return CreateComparison(comparisonDescriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -358,17 +336,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::Gather return std::make_unique<NeonGatherWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - - ComparisonQueueDescriptor comparisonDescriptor; - comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater); - - return CreateComparison(comparisonDescriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -449,12 +416,6 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemImport(const Mem return std::make_unique<ImportMemGenericWorkload>(descriptor, info); } -std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return CreateConcat(descriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -552,32 +513,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDe return std::make_unique<NeonResizeWorkload>(descriptor, info); } -std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear( - const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - ResizeQueueDescriptor resizeDescriptor; - resizeDescriptor.m_Inputs = descriptor.m_Inputs; - resizeDescriptor.m_Outputs = descriptor.m_Outputs; - - resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout; - resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth; - resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight; - - return CreateResize(resizeDescriptor, info); -} - -std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor, - const WorkloadInfo &info) const -{ - IgnoreUnused(descriptor); - - ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; - elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt); - - return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 41fc506aaa..bd84c057f5 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -52,10 +52,6 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -122,10 +118,6 @@ public: std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& Info) const override; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -138,10 +130,6 @@ public: std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -172,10 +160,6 @@ public: std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateConcat instead") - std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -224,14 +208,6 @@ public: std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateResize instead") - std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp index 1199f30863..f51493d383 100644 --- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp +++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp @@ -73,14 +73,10 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor, case DataType::QAsymmU8: CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>()); break; - ARMNN_NO_DEPRECATE_WARN_BEGIN - case DataType::QuantizedSymm8PerAxis: - ARMNN_FALLTHROUGH; case DataType::QSymmS8: case DataType::QAsymmS8: CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>()); break; - ARMNN_NO_DEPRECATE_WARN_END case DataType::Signed32: CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>()); break; diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp index c9f164e0c9..a3060f0798 100644 --- a/src/backends/reference/RefBackend.cpp +++ b/src/backends/reference/RefBackend.cpp @@ -58,27 +58,12 @@ IBackendInternal::IMemoryManagerUniquePtr RefBackend::CreateMemoryManager() cons return std::make_unique<RefMemoryManager>(); } -IBackendInternal::Optimizations RefBackend::GetOptimizations() const -{ - return Optimizations{}; -} - IBackendInternal::ILayerSupportSharedPtr RefBackend::GetLayerSupport() const { static ILayerSupportSharedPtr layerSupport{new RefLayerSupport}; return layerSupport; } -bool RefBackend::HasCapability(BackendCapability capabilityClass) const -{ - auto search = oldCpuRefCapabilities.find(capabilityClass); - if (search != oldCpuRefCapabilities.end()) - { - return true; - } - return false; -} - OptimizationViews RefBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const { OptimizationViews optimizationViews; diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp index 28c1591179..c04bf43db3 100644 --- a/src/backends/reference/RefBackend.hpp +++ b/src/backends/reference/RefBackend.hpp @@ -47,7 +47,6 @@ public: IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext( const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override; - IBackendInternal::Optimizations GetOptimizations() const override; IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override; OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override; @@ -60,8 +59,6 @@ public: { return cpuRefCapabilities; }; - - bool HasCapability(BackendCapability capabilityClass) const override; }; } // namespace armnn diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index c0ede678bf..b80aa9992f 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -58,15 +58,6 @@ std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected, } // anonymous namespace -bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsElementwiseUnarySupported(input, - output, - ElementwiseUnaryDescriptor(UnaryOperation::Abs), - reasonIfUnsupported); -} - bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -565,15 +556,12 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array<DataType, 4> supportedWeightTypes = + std::array<DataType, 3> supportedWeightTypes = { DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated + DataType::QSymmS8 }; - ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, "Reference Convolution2d: weights type not supported for quantized input."); @@ -769,15 +757,12 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array<DataType, 4> supportedWeightTypes = + std::array<DataType, 3> supportedWeightTypes = { DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated }; - ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, "Reference DepthwiseConvolution2d: weights type not supported for " @@ -977,18 +962,6 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, return supported; } -bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsComparisonSupported(input0, - input1, - output, - ComparisonDescriptor(ComparisonOperation::Equal), - reasonIfUnsupported); -} - bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const @@ -1173,18 +1146,6 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, return supported; } -bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsComparisonSupported(input0, - input1, - output, - ComparisonDescriptor(ComparisonOperation::Greater), - reasonIfUnsupported); -} - bool RefLayerSupport::IsInputSupported(const TensorInfo& /*input*/, Optional<std::string&> /*reasonIfUnsupported*/) const { @@ -1523,14 +1484,6 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, return supported; } -bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const MergerDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); -} - bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional<std::string &> reasonIfUnsupported) const @@ -1897,33 +1850,6 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input, "Reference reshape: input type not supported."); } -bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - bool supported = true; - std::array<DataType,6> supportedTypes = - { - DataType::BFloat16, - DataType::Float32, - DataType::Float16, - DataType::QAsymmS8, - DataType::QAsymmU8, - DataType::QSymmS16 - }; - - supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, - "Reference ResizeBilinear: input type not supported"); - - supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, - "Reference ResizeBilinear: output type not supported"); - - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference ResizeBilinear: input and output types not matching"); - - return supported; -} - bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, const TensorInfo& output, const ResizeDescriptor& descriptor, @@ -1953,16 +1879,6 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, return supported; } -bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - return IsElementwiseUnarySupported(input, - output, - ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), - reasonIfUnsupported); -} - bool RefLayerSupport::IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const @@ -2101,28 +2017,6 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, } bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported) const -{ - IgnoreUnused(descriptor); - bool supported = true; - std::array<DataType,6> supportedTypes = - { - DataType::BFloat16, - DataType::Float32, - DataType::Float16, - DataType::QAsymmS8, - DataType::QAsymmU8, - DataType::QSymmS16 - }; - - supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, - "Reference splitter: input type not supported"); - - return supported; -} - -bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const @@ -2322,15 +2216,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array<DataType, 4> supportedWeightTypes = + std::array<DataType, 3> supportedWeightTypes = { DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis //Deprecated + DataType::QSymmS8 }; - ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, "Reference TransposeConvolution2d: weights type not supported for " diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 627418e3e1..53d7907204 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -12,11 +12,6 @@ namespace armnn class RefLayerSupport : public LayerSupportBase { public: - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsAbsSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, @@ -147,12 +142,6 @@ public: const ElementwiseUnaryDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") - bool IsEqualSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -179,12 +168,6 @@ public: const GatherDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") - bool IsGreaterSupported(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsInputSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -230,12 +213,6 @@ public: const MeanDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") - bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, - const TensorInfo& output, - const MergerDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsMemCopySupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -301,20 +278,11 @@ public: const ReshapeDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsResizeSupported(const TensorInfo& input, const TensorInfo& output, const ResizeDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") - bool IsRsqrtSupported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsShapeSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -340,11 +308,6 @@ public: Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead") - bool IsSplitterSupported(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsSplitterSupported(const TensorInfo& input, const std::vector<std::reference_wrapper<TensorInfo>>& outputs, const ViewsDescriptor& descriptor, diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 18a5af277f..75008bc866 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -129,16 +129,6 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; - elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs; - - return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -331,16 +321,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const Elem return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - ComparisonQueueDescriptor comparisonDescriptor; - comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal; - - return CreateComparison(comparisonDescriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -379,16 +359,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDes return std::make_unique<RefGatherWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - ComparisonQueueDescriptor comparisonDescriptor; - comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater; - - return CreateComparison(comparisonDescriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -479,12 +449,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQu return std::make_unique<ImportMemGenericWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return CreateConcat(descriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -615,28 +579,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDes return std::make_unique<RefResizeWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - ResizeQueueDescriptor resizeDescriptor; - resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear; - resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout; - resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth; - resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight; - - return CreateResize(resizeDescriptor, info); -} - -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - IgnoreUnused(descriptor); - ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; - elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt; - - return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateShape(const ShapeQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index d00d3ca822..a85e8dda3e 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -66,10 +66,6 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -139,10 +135,6 @@ public: std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -158,10 +150,6 @@ public: std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateComparison instead") - std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -192,10 +180,6 @@ public: std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateConcat instead") - std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -241,14 +225,6 @@ public: std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG("Use CreateResize instead") - std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") - std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 7d6c59a273..f8169a6c0c 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -88,7 +88,6 @@ BACKEND_SOURCES := \ workloads/RefQuantizeWorkload.cpp \ workloads/RefReduceWorkload.cpp \ workloads/RefReshapeWorkload.cpp \ - workloads/RefResizeBilinearWorkload.cpp \ workloads/RefResizeWorkload.cpp \ workloads/RefSliceWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index e169c03ad8..5727291be3 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -148,8 +148,6 @@ list(APPEND armnnRefBackendWorkloads_sources RefReduceWorkload.hpp RefReshapeWorkload.cpp RefReshapeWorkload.hpp - RefResizeBilinearWorkload.cpp - RefResizeBilinearWorkload.hpp RefResizeWorkload.cpp RefResizeWorkload.hpp RefShapeWorkload.hpp diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index cd0dc5d40f..c2a456bfce 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -67,13 +67,6 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const { switch(info.GetDataType()) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - case armnn::DataType::QuantizedSymm8PerAxis: - { - std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info); - return std::make_unique<QSymm8PerAxisDecoder>(static_cast<const int8_t*>(data), info); - } - ARMNN_NO_DEPRECATE_WARN_END case DataType::QAsymmS8: { return std::make_unique<QASymmS8Decoder>( diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index a2d565ec4a..a7be9e172b 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -22,16 +22,6 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void* { switch(info.GetDataType()) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - case armnn::DataType::QuantizedSymm8PerAxis: - { - std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info); - return std::make_unique<QSymm8PerAxisEncoder>( - static_cast<int8_t*>(data), - params.second, - params.first); - } - ARMNN_NO_DEPRECATE_WARN_END case armnn::DataType::QAsymmS8: { return std::make_unique<QASymmS8Encoder>( diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp deleted file mode 100644 index 2cf5888f33..0000000000 --- a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefResizeBilinearWorkload.hpp" - -#include "RefWorkloadUtils.hpp" -#include "Resize.hpp" -#include "BaseIterator.hpp" -#include "Profiling.hpp" - -#include "BaseIterator.hpp" -#include "Decoders.hpp" -#include "Encoders.hpp" - -namespace armnn -{ - -void RefResizeBilinearWorkload::Execute() const -{ - Execute(m_Data.m_Inputs, m_Data.m_Outputs); -} - -void RefResizeBilinearWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) -{ - Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); -} - -void RefResizeBilinearWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearWorkload_Execute"); - - const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); - const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); - - std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, inputs[0]->Map()); - Decoder<float> &decoder = *decoderPtr; - std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, outputs[0]->Map()); - Encoder<float> &encoder = *encoderPtr; - - Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, armnn::ResizeMethod::Bilinear); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp deleted file mode 100644 index 5ada3d1ff8..0000000000 --- a/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <backendsCommon/Workload.hpp> -#include <backendsCommon/WorkloadData.hpp> - -namespace armnn -{ - -class RefResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor> -{ -public: - using BaseWorkload<ResizeBilinearQueueDescriptor>::BaseWorkload; - void Execute() const override; - void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override; -private: - void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index ed3aa90e5f..914137c23d 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -59,7 +59,6 @@ #include "RefRankWorkload.hpp" #include "RefReduceWorkload.hpp" #include "RefReshapeWorkload.hpp" -#include "RefResizeBilinearWorkload.hpp" #include "RefResizeWorkload.hpp" #include "RefShapeWorkload.hpp" #include "RefSliceWorkload.hpp" |