From bb446e576e120512d5752a5d6dc1ddc636f563ba Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Thu, 2 Apr 2020 13:56:54 +0100 Subject: IVGCVSW-4483 Remove boost::polymorphic_downcast * exchange boost::polymorphic_downcast with armnn::PolymorphicDowncast * remove unnecessary includes of boost::polymorphic_downcast Signed-off-by: Jan Eilers Change-Id: Ie603fb82860fe05fee547dc78073230cc62b2e1f --- src/backends/neon/NeonTensorHandleFactory.cpp | 3 +- src/backends/neon/NeonWorkloadFactory.cpp | 3 +- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 95 +++++++++++----------- src/backends/neon/workloads/NeonAbsWorkload.cpp | 7 +- .../neon/workloads/NeonActivationWorkload.cpp | 6 +- .../neon/workloads/NeonAdditionWorkload.cpp | 7 +- .../neon/workloads/NeonArgMinMaxWorkload.cpp | 5 +- .../workloads/NeonBatchNormalizationWorkload.cpp | 7 +- .../neon/workloads/NeonConstantWorkload.cpp | 5 +- .../neon/workloads/NeonConvolution2dWorkload.cpp | 7 +- .../neon/workloads/NeonDequantizeWorkload.cpp | 5 +- .../workloads/NeonDetectionPostProcessWorkload.cpp | 5 +- .../neon/workloads/NeonDivisionWorkload.cpp | 8 +- .../neon/workloads/NeonFloorFloatWorkload.cpp | 8 +- .../neon/workloads/NeonFullyConnectedWorkload.cpp | 5 +- .../workloads/NeonL2NormalizationFloatWorkload.cpp | 5 +- .../neon/workloads/NeonMaximumWorkload.cpp | 7 +- .../neon/workloads/NeonMinimumWorkload.cpp | 8 +- .../neon/workloads/NeonMultiplicationWorkload.cpp | 8 +- src/backends/neon/workloads/NeonNegWorkload.cpp | 7 +- .../workloads/NeonNormalizationFloatWorkload.cpp | 5 +- .../neon/workloads/NeonPooling2dWorkload.cpp | 6 +- src/backends/neon/workloads/NeonPreluWorkload.cpp | 8 +- .../neon/workloads/NeonReshapeWorkload.cpp | 8 +- src/backends/neon/workloads/NeonResizeWorkload.cpp | 6 +- src/backends/neon/workloads/NeonRsqrtWorkload.cpp | 6 +- src/backends/neon/workloads/NeonSliceWorkload.cpp | 6 +- .../neon/workloads/NeonSoftmaxFloatWorkload.cpp | 6 +- .../neon/workloads/NeonSoftmaxUint8Workload.cpp | 5 +- .../neon/workloads/NeonSpaceToDepthWorkload.cpp | 6 +- .../neon/workloads/NeonSplitterWorkload.cpp | 3 +- .../neon/workloads/NeonStridedSliceWorkload.cpp | 5 +- .../neon/workloads/NeonSubtractionWorkload.cpp | 7 +- .../NeonTransposeConvolution2dWorkload.cpp | 5 +- 34 files changed, 163 insertions(+), 130 deletions(-) (limited to 'src/backends/neon') diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp index 26b14af144..a8b5b81412 100644 --- a/src/backends/neon/NeonTensorHandleFactory.cpp +++ b/src/backends/neon/NeonTensorHandleFactory.cpp @@ -7,6 +7,7 @@ #include "NeonTensorHandle.hpp" #include +#include namespace armnn { @@ -36,7 +37,7 @@ std::unique_ptr NeonTensorHandleFactory::CreateSubTensorHandle(IT } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 47f72050a5..b3104b9576 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -69,7 +70,7 @@ std::unique_ptr NeonWorkloadFactory::CreateSubTensorHandle(ITenso } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo, diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 3e1888cb54..447bad155f 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -6,6 +6,7 @@ #include "NeonWorkloadFactoryHelper.hpp" #include +#include #include #include @@ -72,8 +73,8 @@ static void NeonCreateActivationWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). ActivationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType))); } @@ -103,9 +104,9 @@ static void NeonCreateElementwiseWorkloadTest() auto workload = CreateElementwiseWorkloadTest(factory, graph); DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); @@ -201,8 +202,8 @@ static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3}; @@ -247,8 +248,8 @@ static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayo // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -287,8 +288,8 @@ static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list({ 2, 2, 5, 5 }) : std::initializer_list({ 2, 5, 5, 2 }); @@ -322,8 +323,8 @@ static void NeonCreateFullyConnectedWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType))); } @@ -351,8 +352,8 @@ static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5}; @@ -398,8 +399,8 @@ static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout:: // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -449,9 +450,9 @@ static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape, // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). PreluQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto alphaHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto alphaHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType))); BOOST_TEST(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType))); @@ -485,8 +486,8 @@ static void NeonCreateReshapeWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType))); } @@ -518,8 +519,8 @@ static void NeonCreateResizeWorkloadTest(DataLayout dataLayout) auto queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); switch (dataLayout) { @@ -565,8 +566,8 @@ static void NeonCreateSoftmaxWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType))); } @@ -593,8 +594,8 @@ static void NeonSpaceToDepthWorkloadTest() auto workload = CreateSpaceToDepthWorkloadTest(factory, graph); SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType))); @@ -630,16 +631,16 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32))); - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32))); - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32))); - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); } @@ -743,8 +744,8 @@ static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; @@ -788,8 +789,8 @@ static void NeonCreateLstmWorkloadTest() LstmQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32))); @@ -811,9 +812,9 @@ static void NeonCreateConcatWorkloadTest(std::initializer_list out auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); ConcatQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle0 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType))); @@ -871,10 +872,10 @@ static void NeonCreateStackWorkloadTest(const std::initializer_listGetData(); for (unsigned int i = 0; i < numInputs; ++i) { - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[i]); + auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); } - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -898,8 +899,6 @@ BOOST_AUTO_TEST_CASE(CreateStackUint8Workload) template static void NeonCreateQuantizedLstmWorkloadTest() { - using boost::polymorphic_downcast; - Graph graph; NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); @@ -907,23 +906,23 @@ static void NeonCreateQuantizedLstmWorkloadTest() QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData(); - IAclTensorHandle* inputHandle = polymorphic_downcast(queueDescriptor.m_Inputs[0]); + IAclTensorHandle* inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2}))); BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8)); - IAclTensorHandle* cellStateInHandle = polymorphic_downcast(queueDescriptor.m_Inputs[1]); + IAclTensorHandle* cellStateInHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16)); - IAclTensorHandle* outputStateInHandle = polymorphic_downcast(queueDescriptor.m_Inputs[2]); + IAclTensorHandle* outputStateInHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[2]); BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8)); - IAclTensorHandle* cellStateOutHandle = polymorphic_downcast(queueDescriptor.m_Outputs[0]); + IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16)); - IAclTensorHandle* outputStateOutHandle = polymorphic_downcast(queueDescriptor.m_Outputs[1]); + IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4}))); BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8)); } diff --git a/src/backends/neon/workloads/NeonAbsWorkload.cpp b/src/backends/neon/workloads/NeonAbsWorkload.cpp index 7f8ed5a006..ea14ac3897 100644 --- a/src/backends/neon/workloads/NeonAbsWorkload.cpp +++ b/src/backends/neon/workloads/NeonAbsWorkload.cpp @@ -9,8 +9,7 @@ #include #include - -#include +#include namespace armnn { @@ -28,8 +27,8 @@ NeonAbsWorkload::NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const Wor { m_Data.ValidateInputsOutputs("NeonAbsWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_AbsLayer.configure(&input, &output); } diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp index 916d67449c..4b2169a6ee 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.cpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp @@ -5,7 +5,9 @@ #include "NeonActivationWorkload.hpp" #include "NeonWorkloadUtils.hpp" + #include +#include #include @@ -36,8 +38,8 @@ NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& const arm_compute::ActivationLayerInfo activationLayerInfo = ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output, activationLayerInfo); diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp index a025c0b8f5..cb0c8a471f 100644 --- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp +++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp @@ -7,6 +7,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include #include @@ -35,9 +36,9 @@ NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp index 0fa9d43b15..0fb819db0b 100644 --- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp +++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp @@ -10,6 +10,7 @@ #include +#include #include #include @@ -54,8 +55,8 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des const WorkloadInfo& info) : BaseWorkload(descriptor, info) { - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto numDims = info.m_InputTensorInfos[0].GetNumDimensions(); auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis); diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp index cd931e3797..ff777dbf9b 100644 --- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp @@ -7,8 +7,9 @@ #include "NeonWorkloadUtils.hpp" -#include #include +#include +#include #include @@ -53,8 +54,8 @@ NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload( { m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp index b9cb807779..1cffbe1448 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.cpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -41,9 +42,9 @@ void NeonConstantWorkload::Execute() const ARMNN_ASSERT(data.m_LayerOutput != nullptr); arm_compute::ITensor& output = - boost::polymorphic_downcast(data.m_Outputs[0])->GetTensor(); + PolymorphicDowncast(data.m_Outputs[0])->GetTensor(); arm_compute::DataType computeDataType = - boost::polymorphic_downcast(data.m_Outputs[0])->GetDataType(); + PolymorphicDowncast(data.m_Outputs[0])->GetDataType(); switch (computeDataType) { diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp index 5d45642eef..144baec0ca 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp @@ -5,8 +5,9 @@ #include "NeonConvolution2dWorkload.hpp" -#include #include +#include +#include #include #include @@ -65,8 +66,8 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( // todo: check tensor shapes match. - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp index 8b229a1cda..9ae82ff79f 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -32,8 +33,8 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); std::unique_ptr layer(new arm_compute::NEDequantizationLayer()); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp index 2ed47e4463..36f1cd98de 100644 --- a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp +++ b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp @@ -9,8 +9,7 @@ #include #include - -#include +#include namespace armnn { @@ -85,7 +84,7 @@ NeonDetectionPostProcessWorkload::NeonDetectionPostProcessWorkload( auto AclTensorRef = [](ITensorHandle* tensor) -> arm_compute::ITensor& { - return boost::polymorphic_downcast(tensor)->GetTensor(); + return PolymorphicDowncast(tensor)->GetTensor(); }; arm_compute::ITensor& boxEncodings = AclTensorRef(m_Data.m_Inputs[0]); diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp index 6fdb455f25..fc353f136d 100644 --- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp +++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp @@ -4,7 +4,9 @@ // #include "NeonDivisionWorkload.hpp" + #include +#include #include namespace armnn @@ -29,9 +31,9 @@ NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonDivisionWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_DivLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp index 5b4e9094fd..c49df33a54 100644 --- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp @@ -7,9 +7,9 @@ #include "NeonWorkloadUtils.hpp" -#include +#include -#include +#include namespace armnn { @@ -19,8 +19,8 @@ NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descr { m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp index 338c7eb1f6..e808c60c0c 100644 --- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp +++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp @@ -8,6 +8,7 @@ #include "NeonWorkloadUtils.hpp" #include #include +#include #include #include @@ -51,8 +52,8 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue { m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_WeightsTensor = std::make_unique(); BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo()); diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index 9de6c82702..d54607d31e 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -8,6 +8,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include @@ -33,8 +34,8 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma { m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp index c433d81973..46d500bfdc 100644 --- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp +++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp @@ -5,6 +5,7 @@ #include "NeonMaximumWorkload.hpp" #include +#include #include namespace armnn @@ -29,9 +30,9 @@ NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonMaximumWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_MaxLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp index 2867a8079f..53e483a182 100644 --- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp +++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp @@ -4,7 +4,9 @@ // #include "NeonMinimumWorkload.hpp" + #include +#include #include namespace armnn @@ -29,9 +31,9 @@ NeonMinimumWorkload::NeonMinimumWorkload(const MinimumQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonMinimumWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_MinLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp index 66fbedfa63..d813970901 100644 --- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp +++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include namespace armnn @@ -37,9 +39,9 @@ NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueue { m_Data.ValidateInputsOutputs("NeonMultiplicationWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it, // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be diff --git a/src/backends/neon/workloads/NeonNegWorkload.cpp b/src/backends/neon/workloads/NeonNegWorkload.cpp index afe05583fd..06c146754c 100644 --- a/src/backends/neon/workloads/NeonNegWorkload.cpp +++ b/src/backends/neon/workloads/NeonNegWorkload.cpp @@ -9,8 +9,7 @@ #include #include - -#include +#include namespace armnn { @@ -28,8 +27,8 @@ NeonNegWorkload::NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descript { m_Data.ValidateInputsOutputs("NeonNegWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_NegLayer.configure(&input, &output); } diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp index 8cb4ec975d..77fc429b95 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp @@ -8,6 +8,7 @@ #include "NeonWorkloadUtils.hpp" #include #include +#include #include @@ -77,8 +78,8 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality."); } - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp index 9934c29a41..968d5ce02d 100644 --- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include #include #include @@ -37,8 +39,8 @@ NeonPooling2dWorkload::NeonPooling2dWorkload( { m_Data.ValidateInputsOutputs("NeonPooling2dWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonPreluWorkload.cpp b/src/backends/neon/workloads/NeonPreluWorkload.cpp index 107090e704..8e6ea301de 100644 --- a/src/backends/neon/workloads/NeonPreluWorkload.cpp +++ b/src/backends/neon/workloads/NeonPreluWorkload.cpp @@ -5,7 +5,9 @@ #include "NeonPreluWorkload.hpp" #include "NeonWorkloadUtils.hpp" + #include +#include #include @@ -31,9 +33,9 @@ NeonPreluWorkload::NeonPreluWorkload(const PreluQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("NeonPreluWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& alpha = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& alpha = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &alpha, &output); diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp index 659bb94723..8b11da7253 100644 --- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp +++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp @@ -7,9 +7,9 @@ #include "NeonWorkloadUtils.hpp" -#include +#include -#include +#include namespace armnn { @@ -29,8 +29,8 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp index e936ab7446..9e3be2655c 100644 --- a/src/backends/neon/workloads/NeonResizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp @@ -9,7 +9,9 @@ #include #include +#include #include + #include using namespace armnn::armcomputetensorutils; @@ -45,8 +47,8 @@ NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("NeonResizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp index b6292833dd..44980df996 100644 --- a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp +++ b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp @@ -9,8 +9,8 @@ #include #include +#include -#include namespace armnn { @@ -28,8 +28,8 @@ NeonRsqrtWorkload::NeonRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, con { m_Data.ValidateInputsOutputs("NeonRsqrtWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); m_RsqrtLayer.configure(&input, &output); } diff --git a/src/backends/neon/workloads/NeonSliceWorkload.cpp b/src/backends/neon/workloads/NeonSliceWorkload.cpp index 171edc6c59..32cc042eab 100644 --- a/src/backends/neon/workloads/NeonSliceWorkload.cpp +++ b/src/backends/neon/workloads/NeonSliceWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include #include @@ -37,8 +39,8 @@ NeonSliceWorkload::NeonSliceWorkload(const SliceQueueDescriptor& descriptor, { m_Data.ValidateInputsOutputs("NeonSliceWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::Coordinates starts; arm_compute::Coordinates ends; diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp index 152d19cc04..a4690a7985 100644 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp @@ -8,6 +8,8 @@ #include "NeonWorkloadUtils.hpp" #include +#include + #include namespace armnn @@ -20,8 +22,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1); // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions. - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(memoryManager); unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 15a7066861..05d93b963c 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -7,6 +7,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include @@ -20,8 +21,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); const auto outputQuantization = output.info()->quantization_info(); diff --git a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp index a4204b21e6..2982cd181d 100644 --- a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp +++ b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp @@ -5,6 +5,8 @@ #include "NeonSpaceToDepthWorkload.hpp" #include "NeonWorkloadUtils.hpp" + +#include #include namespace armnn @@ -33,12 +35,12 @@ NeonSpaceToDepthWorkload::NeonSpaceToDepthWorkload(const SpaceToDepthQueueDescri arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); input.info()->set_data_layout(aclDataLayout); int32_t blockSize = boost::numeric_cast(desc.m_Parameters.m_BlockSize); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); output.info()->set_data_layout(aclDataLayout); m_Layer.reset(new arm_compute::NESpaceToDepthLayer()); diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index 224e97af2d..19fa7c6389 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -74,7 +75,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri return; } - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); std::vector aclOutputs; for (auto output : m_Data.m_Outputs) diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp index 356c0aea83..282005c7cc 100644 --- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp +++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include namespace armnn @@ -50,8 +51,8 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri { m_Data.ValidateInputsOutputs("NeonStridedSliceWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::Coordinates starts; arm_compute::Coordinates ends; diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp index f4b4707633..ccc2bfe58b 100644 --- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp +++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp @@ -7,6 +7,7 @@ #include "NeonWorkloadUtils.hpp" #include +#include #include #include @@ -34,9 +35,9 @@ NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescripto { m_Data.ValidateInputsOutputs("NeonSubtractionWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp index ffca2076fe..985f540e6a 100644 --- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp @@ -9,6 +9,7 @@ #include #include +#include #include @@ -60,8 +61,8 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload( { m_Data.ValidateInputsOutputs("NeonTransposeConvolution2dWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); -- cgit v1.2.1