diff options
author | Jan Eilers <jan.eilers@arm.com> | 2020-04-10 13:00:44 +0100 |
---|---|---|
committer | Jan Eilers <jan.eilers@arm.com> | 2020-04-14 09:24:26 +0100 |
commit | 3c9e04563b9fb7d7aadc61834909a9ffc6b1769c (patch) | |
tree | 367af149439d56a01eba1b522b7486aca5d56012 /src | |
parent | 76bc728bc1681ed216ffe6f7720f3f57b5137fab (diff) | |
download | armnn-3c9e04563b9fb7d7aadc61834909a9ffc6b1769c.tar.gz |
IVGCVSW-4483 Removes boost::polymorphic_pointer_downcast
* replace boost::polymorphic_pointer_downcast by PolymorphicPointerDowncast
* replaced/removed includes
Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: I0ef934a3804cf05e4c38dec6c4ec49c76111a302
Diffstat (limited to 'src')
23 files changed, 65 insertions, 70 deletions
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index f662754693..0a898ec2c0 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -17,12 +17,12 @@ #include <armnn/backends/IBackendContext.hpp> #include <armnn/backends/IMemoryManager.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> + #include <Optimizer.hpp> #include <arm_compute/runtime/CL/CLBufferAllocator.h> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { @@ -41,7 +41,7 @@ IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const { return std::make_unique<ClWorkloadFactory>( - boost::polymorphic_pointer_downcast<ClMemoryManager>(memoryManager)); + PolymorphicPointerDowncast<ClMemoryManager>(memoryManager)); } IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory( @@ -52,7 +52,7 @@ IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory( registry.RegisterMemoryManager(memoryManager); return std::make_unique<ClWorkloadFactory>( - boost::polymorphic_pointer_downcast<ClMemoryManager>(memoryManager)); + PolymorphicPointerDowncast<ClMemoryManager>(memoryManager)); } std::vector<ITensorHandleFactory::FactoryId> ClBackend::GetHandleFactoryPreferences() const diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp index dbcccce945..40357d5706 100644 --- a/src/backends/cl/ClContextControl.cpp +++ b/src/backends/cl/ClContextControl.cpp @@ -16,7 +16,6 @@ #include <arm_compute/runtime/CL/CLScheduler.h> #include <boost/format.hpp> -#include <boost/polymorphic_cast.hpp> namespace cl { diff --git a/src/backends/cl/ClTensorHandle.hpp b/src/backends/cl/ClTensorHandle.hpp index 1830d186b6..0481307972 100644 --- a/src/backends/cl/ClTensorHandle.hpp +++ b/src/backends/cl/ClTensorHandle.hpp @@ -9,6 +9,8 @@ #include <Half.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> + #include <arm_compute/runtime/CL/CLTensor.h> #include <arm_compute/runtime/CL/CLSubTensor.h> #include <arm_compute/runtime/IMemoryGroup.h> @@ -16,8 +18,6 @@ #include <arm_compute/core/TensorShape.h> #include <arm_compute/core/Coordinates.h> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { @@ -71,7 +71,7 @@ public: virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override { - m_MemoryGroup = boost::polymorphic_pointer_downcast<arm_compute::MemoryGroup>(memoryGroup); + m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup); } TensorShape GetStrides() const override diff --git a/src/backends/cl/test/ClWorkloadFactoryHelper.hpp b/src/backends/cl/test/ClWorkloadFactoryHelper.hpp index 1dfba7573b..6ea2f119da 100644 --- a/src/backends/cl/test/ClWorkloadFactoryHelper.hpp +++ b/src/backends/cl/test/ClWorkloadFactoryHelper.hpp @@ -7,13 +7,12 @@ #include <armnn/backends/IBackendInternal.hpp> #include <armnn/backends/IMemoryManager.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/test/WorkloadFactoryHelper.hpp> #include <cl/ClBackend.hpp> #include <cl/ClWorkloadFactory.hpp> -#include <boost/polymorphic_pointer_cast.hpp> - namespace { @@ -29,7 +28,7 @@ struct WorkloadFactoryHelper<armnn::ClWorkloadFactory> static armnn::ClWorkloadFactory GetFactory( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return armnn::ClWorkloadFactory(boost::polymorphic_pointer_downcast<armnn::ClMemoryManager>(memoryManager)); + return armnn::ClWorkloadFactory(armnn::PolymorphicPointerDowncast<armnn::ClMemoryManager>(memoryManager)); } }; diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp index 5370466163..e0aebd30cb 100644 --- a/src/backends/cl/workloads/ClConcatWorkload.cpp +++ b/src/backends/cl/workloads/ClConcatWorkload.cpp @@ -5,6 +5,7 @@ #include "ClConcatWorkload.hpp" #include "ClWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <cl/ClTensorHandle.hpp> #include <cl/ClLayerSupport.hpp> @@ -12,8 +13,6 @@ #include <arm_compute/core/Types.h> #include <arm_compute/runtime/CL/functions/CLConcatenateLayer.h> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { using namespace armcomputetensorutils; @@ -72,11 +71,12 @@ ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, cons std::vector<arm_compute::ICLTensor *> aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor(); + arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>( - m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::ICLTensor& output = + armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); // Create the layer function auto layer = std::make_unique<arm_compute::CLConcatenateLayer>(); diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp index 800a98409e..04885b18aa 100644 --- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp +++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp @@ -8,11 +8,11 @@ #include "ClWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <cl/ClTensorHandle.hpp> #include <boost/numeric/conversion/cast.hpp> -#include <boost/polymorphic_pointer_cast.hpp> namespace armnn { @@ -45,13 +45,13 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); arm_compute::ICLTensor& input = - boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); + PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); input.info()->set_data_layout(aclDataLayout); int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize); arm_compute::ICLTensor& output = - boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); + PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); output.info()->set_data_layout(aclDataLayout); m_Layer.configure(&input, &output, blockSize); diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp index eca795de7e..eb63900380 100644 --- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp +++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp @@ -7,6 +7,7 @@ #include "ClWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <arm_compute/core/Types.h> @@ -14,8 +15,6 @@ #include <cl/ClLayerSupport.hpp> #include <cl/ClTensorHandle.hpp> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { using namespace armcomputetensorutils; @@ -34,10 +33,10 @@ ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& desc { m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1); - arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>( + arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>( m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>( + arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::CLDequantizationLayer()); diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp index 64da92c815..b87658b3f9 100644 --- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp +++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp @@ -9,13 +9,12 @@ #include <aclCommon/ArmComputeUtils.hpp> #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <cl/ClLayerSupport.hpp> #include <cl/ClTensorHandle.hpp> #include <cl/ClLayerSupport.hpp> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { using namespace armcomputetensorutils; @@ -51,9 +50,9 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload( m_Data.ValidateInputsOutputs("ClSpaceToBatchNdWorkload", 1, 1); arm_compute::ICLTensor& input = - boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = - boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); // ArmNN blockShape is [H, W] Cl asks for W, H int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]); diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp index 296e0a3dde..045fbb7595 100644 --- a/src/backends/cl/workloads/ClSplitterWorkload.cpp +++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp @@ -10,6 +10,7 @@ #include <aclCommon/ArmComputeTensorUtils.hpp> #include <aclCommon/ArmComputeUtils.hpp> #include <arm_compute/runtime/CL/functions/CLSplit.h> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <cl/ClTensorHandle.hpp> @@ -74,13 +75,13 @@ ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor return; } - arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>( + arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>( m_Data.m_Inputs[0])->GetTensor(); std::vector<arm_compute::ICLTensor *> aclOutputs; for (auto output : m_Data.m_Outputs) { - arm_compute::ICLTensor& aclOutput = boost::polymorphic_pointer_downcast<IClTensorHandle>(output)->GetTensor(); + arm_compute::ICLTensor& aclOutput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(output)->GetTensor(); aclOutputs.emplace_back(&aclOutput); } diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp index 3ba698ec4d..e434f9897f 100644 --- a/src/backends/cl/workloads/ClStackWorkload.cpp +++ b/src/backends/cl/workloads/ClStackWorkload.cpp @@ -5,6 +5,7 @@ #include "ClStackWorkload.hpp" #include "ClWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <cl/ClTensorHandle.hpp> #include <cl/ClLayerSupport.hpp> @@ -12,7 +13,6 @@ #include <arm_compute/core/Types.h> #include <boost/numeric/conversion/cast.hpp> -#include <boost/polymorphic_pointer_cast.hpp> namespace armnn { @@ -51,10 +51,10 @@ ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, const W std::vector<arm_compute::ICLTensor*> aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor(); + arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>( + arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::CLStackLayer()); diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index 4201ba8c9a..841ed27006 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -16,12 +16,13 @@ #include <armnn/backends/IBackendContext.hpp> #include <armnn/backends/IMemoryManager.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> + #include <Optimizer.hpp> #include <arm_compute/runtime/Allocator.h> #include <boost/cast.hpp> -#include <boost/polymorphic_pointer_cast.hpp> namespace armnn { @@ -42,7 +43,7 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const { return std::make_unique<NeonWorkloadFactory>( - boost::polymorphic_pointer_downcast<NeonMemoryManager>(memoryManager)); + PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager)); } IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory( @@ -53,7 +54,7 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory( tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager); return std::make_unique<NeonWorkloadFactory>( - boost::polymorphic_pointer_downcast<NeonMemoryManager>(memoryManager)); + PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager)); } IBackendInternal::IBackendContextPtr NeonBackend::CreateBackendContext(const IRuntime::CreationOptions&) const diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp index f251034823..4cc610c85a 100644 --- a/src/backends/neon/NeonTensorHandle.hpp +++ b/src/backends/neon/NeonTensorHandle.hpp @@ -11,6 +11,7 @@ #include <aclCommon/ArmComputeTensorHandle.hpp> #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <arm_compute/runtime/MemoryGroup.h> #include <arm_compute/runtime/IMemoryGroup.h> @@ -19,8 +20,6 @@ #include <arm_compute/core/TensorShape.h> #include <arm_compute/core/Coordinates.h> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { @@ -77,7 +76,7 @@ public: virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override { - m_MemoryGroup = boost::polymorphic_pointer_downcast<arm_compute::MemoryGroup>(memoryGroup); + m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup); } virtual const void* Map(bool /* blocking = true */) const override diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index b3104b9576..b7609ee765 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -21,8 +21,6 @@ #include <neon/workloads/NeonWorkloadUtils.hpp> #include <neon/workloads/NeonWorkloads.hpp> -#include <boost/polymorphic_cast.hpp> - namespace armnn { diff --git a/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp b/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp index 708d23141d..8d92ddf44d 100644 --- a/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp +++ b/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp @@ -7,13 +7,12 @@ #include <armnn/backends/IBackendInternal.hpp> #include <armnn/backends/IMemoryManager.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/test/WorkloadFactoryHelper.hpp> #include <neon/NeonBackend.hpp> #include <neon/NeonWorkloadFactory.hpp> -#include <boost/polymorphic_pointer_cast.hpp> - namespace { @@ -30,7 +29,7 @@ struct WorkloadFactoryHelper<armnn::NeonWorkloadFactory> const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return armnn::NeonWorkloadFactory( - boost::polymorphic_pointer_downcast<armnn::NeonMemoryManager>(memoryManager)); + armnn::PolymorphicPointerDowncast<armnn::NeonMemoryManager>(memoryManager)); } }; diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp index a6e7aa4415..d2f538745c 100644 --- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp @@ -6,6 +6,8 @@ #include "NeonBatchToSpaceNdWorkload.hpp" #include "NeonWorkloadUtils.hpp" + +#include <armnn/utility/PolymorphicDowncast.hpp> #include <ResolveType.hpp> namespace armnn @@ -38,9 +40,9 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue m_Data.ValidateInputsOutputs("NeonBatchToSpaceNdWorkload", 1, 1); arm_compute::ITensor& input = - boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = - boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonComparisonWorkload.cpp b/src/backends/neon/workloads/NeonComparisonWorkload.cpp index 0edb3328b9..6e1f208228 100644 --- a/src/backends/neon/workloads/NeonComparisonWorkload.cpp +++ b/src/backends/neon/workloads/NeonComparisonWorkload.cpp @@ -6,6 +6,7 @@ #include "NeonComparisonWorkload.hpp" #include <aclCommon/ArmComputeUtils.hpp> #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> namespace armnn @@ -35,9 +36,9 @@ NeonComparisonWorkload::NeonComparisonWorkload(const ComparisonQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonComparisonWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); const arm_compute::ComparisonOperation comparisonOperation = ConvertComparisonOperationToAcl(m_Data.m_Parameters); diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp index 4a9f68798c..65678aae59 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.cpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp @@ -8,11 +8,10 @@ #include "NeonWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <neon/NeonTensorHandle.hpp> - - namespace armnn { using namespace armcomputetensorutils; @@ -73,10 +72,10 @@ const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) std::vector<arm_compute::ITensor *> aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor(); + arm_compute::ITensor& aclInput = armnn::PolymorphicPointerDowncast<IAclTensorHandle>(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>( + arm_compute::ITensor& output = armnn::PolymorphicPointerDowncast<IAclTensorHandle>( m_Data.m_Outputs[0])->GetTensor(); // Create the layer function diff --git a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp index b30dfcd80e..12e7d206bf 100644 --- a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp +++ b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp @@ -8,9 +8,9 @@ #include "NeonWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <boost/numeric/conversion/cast.hpp> -#include <boost/polymorphic_pointer_cast.hpp> namespace armnn { @@ -39,13 +39,13 @@ NeonDepthToSpaceWorkload::NeonDepthToSpaceWorkload(const DepthToSpaceQueueDescri arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); arm_compute::ITensor& input = - boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); + PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); input.info()->set_data_layout(aclDataLayout); int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize); arm_compute::ITensor& output = - boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); + PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); output.info()->set_data_layout(aclDataLayout); m_Layer.configure(&input, &output, blockSize); diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp index 4f3ea2c3c1..14fbdf3dd9 100644 --- a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp @@ -8,10 +8,9 @@ #include <neon/NeonTensorHandle.hpp> #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <arm_compute/core/Types.h> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { using namespace armcomputetensorutils; @@ -30,9 +29,9 @@ NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonQuantizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_pointer_downcast<IAclTensorHandle>( + arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>( m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>( + arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::NEQuantizationLayer()); diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp index 199e926142..d68ab4c4ac 100644 --- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp +++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp @@ -6,6 +6,8 @@ #include "NeonSpaceToBatchNdWorkload.hpp" #include "NeonWorkloadUtils.hpp" + +#include <armnn/utility/PolymorphicDowncast.hpp> #include <ResolveType.hpp> namespace armnn @@ -44,9 +46,9 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue m_Data.ValidateInputsOutputs("NESpaceToBatchNdWorkload", 1, 1); arm_compute::ITensor& input = - boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); + PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = - boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); + PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor(); // ArmNN blockShape is [H, W] Cl asks for W, H int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]); diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index 19fa7c6389..de6f1378bd 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -80,7 +80,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri std::vector<arm_compute::ITensor *> aclOutputs; for (auto output : m_Data.m_Outputs) { - arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(output)->GetTensor(); + arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor(); aclOutputs.emplace_back(&aclOutput); } diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp index b21494397d..a3ba8d888d 100644 --- a/src/backends/neon/workloads/NeonStackWorkload.cpp +++ b/src/backends/neon/workloads/NeonStackWorkload.cpp @@ -6,11 +6,11 @@ #include "NeonWorkloadUtils.hpp" #include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <neon/NeonTensorHandle.hpp> #include <boost/numeric/conversion/cast.hpp> -#include <boost/polymorphic_pointer_cast.hpp> namespace armnn { @@ -53,10 +53,10 @@ NeonStackWorkload::NeonStackWorkload(const StackQueueDescriptor& descriptor, con std::vector<arm_compute::ITensor*> aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor(); + arm_compute::ITensor& aclInput = PolymorphicPointerDowncast<IAclTensorHandle>(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>( + arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::NEStackLayer()); diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp index 584ce78fdb..590fde3e16 100644 --- a/src/backends/reference/RefBackend.cpp +++ b/src/backends/reference/RefBackend.cpp @@ -10,14 +10,12 @@ #include "RefTensorHandleFactory.hpp" #include <armnn/BackendRegistry.hpp> - #include <armnn/backends/IBackendContext.hpp> #include <armnn/backends/IMemoryManager.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> #include <Optimizer.hpp> -#include <boost/polymorphic_pointer_cast.hpp> - namespace armnn { @@ -30,7 +28,7 @@ const BackendId& RefBackend::GetIdStatic() IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const { - return std::make_unique<RefWorkloadFactory>(boost::polymorphic_pointer_downcast<RefMemoryManager>(memoryManager)); + return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager)); } IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory( @@ -40,7 +38,7 @@ IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory( tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager); - return std::make_unique<RefWorkloadFactory>(boost::polymorphic_pointer_downcast<RefMemoryManager>(memoryManager)); + return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager)); } IBackendInternal::IBackendContextPtr RefBackend::CreateBackendContext(const IRuntime::CreationOptions&) const |