From 3c9e04563b9fb7d7aadc61834909a9ffc6b1769c Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Fri, 10 Apr 2020 13:00:44 +0100 Subject: IVGCVSW-4483 Removes boost::polymorphic_pointer_downcast * replace boost::polymorphic_pointer_downcast by PolymorphicPointerDowncast * replaced/removed includes Signed-off-by: Jan Eilers Change-Id: I0ef934a3804cf05e4c38dec6c4ec49c76111a302 --- src/backends/cl/ClBackend.cpp | 8 ++++---- src/backends/cl/ClContextControl.cpp | 1 - src/backends/cl/ClTensorHandle.hpp | 6 +++--- src/backends/cl/test/ClWorkloadFactoryHelper.hpp | 5 ++--- src/backends/cl/workloads/ClConcatWorkload.cpp | 10 +++++----- src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClDequantizeWorkload.cpp | 7 +++---- src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp | 7 +++---- src/backends/cl/workloads/ClSplitterWorkload.cpp | 5 +++-- src/backends/cl/workloads/ClStackWorkload.cpp | 6 +++--- src/backends/neon/NeonBackend.cpp | 7 ++++--- src/backends/neon/NeonTensorHandle.hpp | 5 ++--- src/backends/neon/NeonWorkloadFactory.cpp | 2 -- src/backends/neon/test/NeonWorkloadFactoryHelper.hpp | 5 ++--- src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp | 6 ++++-- src/backends/neon/workloads/NeonComparisonWorkload.cpp | 7 ++++--- src/backends/neon/workloads/NeonConcatWorkload.cpp | 7 +++---- src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp | 6 +++--- src/backends/neon/workloads/NeonQuantizeWorkload.cpp | 7 +++---- src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp | 6 ++++-- src/backends/neon/workloads/NeonSplitterWorkload.cpp | 2 +- src/backends/neon/workloads/NeonStackWorkload.cpp | 6 +++--- src/backends/reference/RefBackend.cpp | 8 +++----- 23 files changed, 65 insertions(+), 70 deletions(-) diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index f662754693..0a898ec2c0 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -17,12 +17,12 @@ #include #include +#include + #include #include -#include - namespace armnn { @@ -41,7 +41,7 @@ IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const { return std::make_unique( - boost::polymorphic_pointer_downcast(memoryManager)); + PolymorphicPointerDowncast(memoryManager)); } IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory( @@ -52,7 +52,7 @@ IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory( registry.RegisterMemoryManager(memoryManager); return std::make_unique( - boost::polymorphic_pointer_downcast(memoryManager)); + PolymorphicPointerDowncast(memoryManager)); } std::vector ClBackend::GetHandleFactoryPreferences() const diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp index dbcccce945..40357d5706 100644 --- a/src/backends/cl/ClContextControl.cpp +++ b/src/backends/cl/ClContextControl.cpp @@ -16,7 +16,6 @@ #include #include -#include namespace cl { diff --git a/src/backends/cl/ClTensorHandle.hpp b/src/backends/cl/ClTensorHandle.hpp index 1830d186b6..0481307972 100644 --- a/src/backends/cl/ClTensorHandle.hpp +++ b/src/backends/cl/ClTensorHandle.hpp @@ -9,6 +9,8 @@ #include +#include + #include #include #include @@ -16,8 +18,6 @@ #include #include -#include - namespace armnn { @@ -71,7 +71,7 @@ public: virtual void SetMemoryGroup(const std::shared_ptr& memoryGroup) override { - m_MemoryGroup = boost::polymorphic_pointer_downcast(memoryGroup); + m_MemoryGroup = PolymorphicPointerDowncast(memoryGroup); } TensorShape GetStrides() const override diff --git a/src/backends/cl/test/ClWorkloadFactoryHelper.hpp b/src/backends/cl/test/ClWorkloadFactoryHelper.hpp index 1dfba7573b..6ea2f119da 100644 --- a/src/backends/cl/test/ClWorkloadFactoryHelper.hpp +++ b/src/backends/cl/test/ClWorkloadFactoryHelper.hpp @@ -7,13 +7,12 @@ #include #include +#include #include #include #include -#include - namespace { @@ -29,7 +28,7 @@ struct WorkloadFactoryHelper static armnn::ClWorkloadFactory GetFactory( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return armnn::ClWorkloadFactory(boost::polymorphic_pointer_downcast(memoryManager)); + return armnn::ClWorkloadFactory(armnn::PolymorphicPointerDowncast(memoryManager)); } }; diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp index 5370466163..e0aebd30cb 100644 --- a/src/backends/cl/workloads/ClConcatWorkload.cpp +++ b/src/backends/cl/workloads/ClConcatWorkload.cpp @@ -5,6 +5,7 @@ #include "ClConcatWorkload.hpp" #include "ClWorkloadUtils.hpp" #include +#include #include #include #include @@ -12,8 +13,6 @@ #include #include -#include - namespace armnn { using namespace armcomputetensorutils; @@ -72,11 +71,12 @@ ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, cons std::vector aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); + arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast( - m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::ICLTensor& output = + armnn::PolymorphicPointerDowncast(m_Data.m_Outputs[0])->GetTensor(); // Create the layer function auto layer = std::make_unique(); diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp index 800a98409e..04885b18aa 100644 --- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp +++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp @@ -8,11 +8,11 @@ #include "ClWorkloadUtils.hpp" #include +#include #include #include -#include namespace armnn { @@ -45,13 +45,13 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); arm_compute::ICLTensor& input = - boost::polymorphic_pointer_downcast(m_Data.m_Inputs[0])->GetTensor(); + PolymorphicPointerDowncast(m_Data.m_Inputs[0])->GetTensor(); input.info()->set_data_layout(aclDataLayout); int32_t blockSize = boost::numeric_cast(desc.m_Parameters.m_BlockSize); arm_compute::ICLTensor& output = - boost::polymorphic_pointer_downcast(m_Data.m_Outputs[0])->GetTensor(); + PolymorphicPointerDowncast(m_Data.m_Outputs[0])->GetTensor(); output.info()->set_data_layout(aclDataLayout); m_Layer.configure(&input, &output, blockSize); diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp index eca795de7e..eb63900380 100644 --- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp +++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp @@ -7,6 +7,7 @@ #include "ClWorkloadUtils.hpp" #include +#include #include #include @@ -14,8 +15,6 @@ #include #include -#include - namespace armnn { using namespace armcomputetensorutils; @@ -34,10 +33,10 @@ ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& desc { m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1); - arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast( + arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast( m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::CLDequantizationLayer()); diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp index 64da92c815..b87658b3f9 100644 --- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp +++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp @@ -9,13 +9,12 @@ #include #include +#include #include #include #include #include -#include - namespace armnn { using namespace armcomputetensorutils; @@ -51,9 +50,9 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload( m_Data.ValidateInputsOutputs("ClSpaceToBatchNdWorkload", 1, 1); arm_compute::ICLTensor& input = - boost::polymorphic_pointer_downcast(m_Data.m_Inputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = - boost::polymorphic_pointer_downcast(m_Data.m_Outputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast(m_Data.m_Outputs[0])->GetTensor(); // ArmNN blockShape is [H, W] Cl asks for W, H int32_t blockHeight = boost::numeric_cast(m_Data.m_Parameters.m_BlockShape[0]); diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp index 296e0a3dde..045fbb7595 100644 --- a/src/backends/cl/workloads/ClSplitterWorkload.cpp +++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -74,13 +75,13 @@ ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor return; } - arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast( + arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast( m_Data.m_Inputs[0])->GetTensor(); std::vector aclOutputs; for (auto output : m_Data.m_Outputs) { - arm_compute::ICLTensor& aclOutput = boost::polymorphic_pointer_downcast(output)->GetTensor(); + arm_compute::ICLTensor& aclOutput = armnn::PolymorphicPointerDowncast(output)->GetTensor(); aclOutputs.emplace_back(&aclOutput); } diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp index 3ba698ec4d..e434f9897f 100644 --- a/src/backends/cl/workloads/ClStackWorkload.cpp +++ b/src/backends/cl/workloads/ClStackWorkload.cpp @@ -5,6 +5,7 @@ #include "ClStackWorkload.hpp" #include "ClWorkloadUtils.hpp" #include +#include #include #include #include @@ -12,7 +13,6 @@ #include #include -#include namespace armnn { @@ -51,10 +51,10 @@ ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, const W std::vector aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); + arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::CLStackLayer()); diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index 4201ba8c9a..841ed27006 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -16,12 +16,13 @@ #include #include +#include + #include #include #include -#include namespace armnn { @@ -42,7 +43,7 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const { return std::make_unique( - boost::polymorphic_pointer_downcast(memoryManager)); + PolymorphicPointerDowncast(memoryManager)); } IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory( @@ -53,7 +54,7 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory( tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager); return std::make_unique( - boost::polymorphic_pointer_downcast(memoryManager)); + PolymorphicPointerDowncast(memoryManager)); } IBackendInternal::IBackendContextPtr NeonBackend::CreateBackendContext(const IRuntime::CreationOptions&) const diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp index f251034823..4cc610c85a 100644 --- a/src/backends/neon/NeonTensorHandle.hpp +++ b/src/backends/neon/NeonTensorHandle.hpp @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -19,8 +20,6 @@ #include #include -#include - namespace armnn { @@ -77,7 +76,7 @@ public: virtual void SetMemoryGroup(const std::shared_ptr& memoryGroup) override { - m_MemoryGroup = boost::polymorphic_pointer_downcast(memoryGroup); + m_MemoryGroup = PolymorphicPointerDowncast(memoryGroup); } virtual const void* Map(bool /* blocking = true */) const override diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index b3104b9576..b7609ee765 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -21,8 +21,6 @@ #include #include -#include - namespace armnn { diff --git a/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp b/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp index 708d23141d..8d92ddf44d 100644 --- a/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp +++ b/src/backends/neon/test/NeonWorkloadFactoryHelper.hpp @@ -7,13 +7,12 @@ #include #include +#include #include #include #include -#include - namespace { @@ -30,7 +29,7 @@ struct WorkloadFactoryHelper const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return armnn::NeonWorkloadFactory( - boost::polymorphic_pointer_downcast(memoryManager)); + armnn::PolymorphicPointerDowncast(memoryManager)); } }; diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp index a6e7aa4415..d2f538745c 100644 --- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp @@ -6,6 +6,8 @@ #include "NeonBatchToSpaceNdWorkload.hpp" #include "NeonWorkloadUtils.hpp" + +#include #include namespace armnn @@ -38,9 +40,9 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue m_Data.ValidateInputsOutputs("NeonBatchToSpaceNdWorkload", 1, 1); arm_compute::ITensor& input = - boost::polymorphic_pointer_downcast(m_Data.m_Inputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = - boost::polymorphic_pointer_downcast(m_Data.m_Outputs[0])->GetTensor(); + armnn::PolymorphicPointerDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonComparisonWorkload.cpp b/src/backends/neon/workloads/NeonComparisonWorkload.cpp index 0edb3328b9..6e1f208228 100644 --- a/src/backends/neon/workloads/NeonComparisonWorkload.cpp +++ b/src/backends/neon/workloads/NeonComparisonWorkload.cpp @@ -6,6 +6,7 @@ #include "NeonComparisonWorkload.hpp" #include #include +#include #include namespace armnn @@ -35,9 +36,9 @@ NeonComparisonWorkload::NeonComparisonWorkload(const ComparisonQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonComparisonWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); const arm_compute::ComparisonOperation comparisonOperation = ConvertComparisonOperationToAcl(m_Data.m_Parameters); diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp index 4a9f68798c..65678aae59 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.cpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp @@ -8,11 +8,10 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include #include - - namespace armnn { using namespace armcomputetensorutils; @@ -73,10 +72,10 @@ const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) std::vector aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); + arm_compute::ITensor& aclInput = armnn::PolymorphicPointerDowncast(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& output = armnn::PolymorphicPointerDowncast( m_Data.m_Outputs[0])->GetTensor(); // Create the layer function diff --git a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp index b30dfcd80e..12e7d206bf 100644 --- a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp +++ b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp @@ -8,9 +8,9 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include -#include namespace armnn { @@ -39,13 +39,13 @@ NeonDepthToSpaceWorkload::NeonDepthToSpaceWorkload(const DepthToSpaceQueueDescri arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); arm_compute::ITensor& input = - boost::polymorphic_pointer_downcast(m_Data.m_Inputs[0])->GetTensor(); + PolymorphicPointerDowncast(m_Data.m_Inputs[0])->GetTensor(); input.info()->set_data_layout(aclDataLayout); int32_t blockSize = boost::numeric_cast(desc.m_Parameters.m_BlockSize); arm_compute::ITensor& output = - boost::polymorphic_pointer_downcast(m_Data.m_Outputs[0])->GetTensor(); + PolymorphicPointerDowncast(m_Data.m_Outputs[0])->GetTensor(); output.info()->set_data_layout(aclDataLayout); m_Layer.configure(&input, &output, blockSize); diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp index 4f3ea2c3c1..14fbdf3dd9 100644 --- a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp @@ -8,10 +8,9 @@ #include #include +#include #include -#include - namespace armnn { using namespace armcomputetensorutils; @@ -30,9 +29,9 @@ NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonQuantizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& input = PolymorphicPointerDowncast( m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& output = PolymorphicPointerDowncast( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::NEQuantizationLayer()); diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp index 199e926142..d68ab4c4ac 100644 --- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp +++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp @@ -6,6 +6,8 @@ #include "NeonSpaceToBatchNdWorkload.hpp" #include "NeonWorkloadUtils.hpp" + +#include #include namespace armnn @@ -44,9 +46,9 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue m_Data.ValidateInputsOutputs("NESpaceToBatchNdWorkload", 1, 1); arm_compute::ITensor& input = - boost::polymorphic_pointer_downcast(m_Data.m_Inputs[0])->GetTensor(); + PolymorphicPointerDowncast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = - boost::polymorphic_pointer_downcast(m_Data.m_Outputs[0])->GetTensor(); + PolymorphicPointerDowncast(m_Data.m_Outputs[0])->GetTensor(); // ArmNN blockShape is [H, W] Cl asks for W, H int32_t blockHeight = boost::numeric_cast(m_Data.m_Parameters.m_BlockShape[0]); diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index 19fa7c6389..de6f1378bd 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -80,7 +80,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri std::vector aclOutputs; for (auto output : m_Data.m_Outputs) { - arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast(output)->GetTensor(); + arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast(output)->GetTensor(); aclOutputs.emplace_back(&aclOutput); } diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp index b21494397d..a3ba8d888d 100644 --- a/src/backends/neon/workloads/NeonStackWorkload.cpp +++ b/src/backends/neon/workloads/NeonStackWorkload.cpp @@ -6,11 +6,11 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include #include #include -#include namespace armnn { @@ -53,10 +53,10 @@ NeonStackWorkload::NeonStackWorkload(const StackQueueDescriptor& descriptor, con std::vector aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); + arm_compute::ITensor& aclInput = PolymorphicPointerDowncast(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& output = PolymorphicPointerDowncast( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::NEStackLayer()); diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp index 584ce78fdb..590fde3e16 100644 --- a/src/backends/reference/RefBackend.cpp +++ b/src/backends/reference/RefBackend.cpp @@ -10,14 +10,12 @@ #include "RefTensorHandleFactory.hpp" #include - #include #include +#include #include -#include - namespace armnn { @@ -30,7 +28,7 @@ const BackendId& RefBackend::GetIdStatic() IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory( const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const { - return std::make_unique(boost::polymorphic_pointer_downcast(memoryManager)); + return std::make_unique(PolymorphicPointerDowncast(memoryManager)); } IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory( @@ -40,7 +38,7 @@ IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory( tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager); - return std::make_unique(boost::polymorphic_pointer_downcast(memoryManager)); + return std::make_unique(PolymorphicPointerDowncast(memoryManager)); } IBackendInternal::IBackendContextPtr RefBackend::CreateBackendContext(const IRuntime::CreationOptions&) const -- cgit v1.2.1