aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-04-02 13:56:54 +0100
committerJan Eilers <jan.eilers@arm.com>2020-04-10 10:11:11 +0100
commitbb446e576e120512d5752a5d6dc1ddc636f563ba (patch)
tree147d0b5f2886af208199a24704afd845a4825bf8 /src/backends/neon/workloads
parente5d0b93b152a26faf93538eb719d03e5b477d670 (diff)
downloadarmnn-bb446e576e120512d5752a5d6dc1ddc636f563ba.tar.gz
IVGCVSW-4483 Remove boost::polymorphic_downcast
* exchange boost::polymorphic_downcast with armnn::PolymorphicDowncast * remove unnecessary includes of boost::polymorphic_downcast Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Ie603fb82860fe05fee547dc78073230cc62b2e1f
Diffstat (limited to 'src/backends/neon/workloads')
-rw-r--r--src/backends/neon/workloads/NeonAbsWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonActivationWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonDequantizeWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonDivisionWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonFloorFloatWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonMaximumWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonMinimumWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonMultiplicationWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonNegWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonPooling2dWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonPreluWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonReshapeWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonResizeWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonRsqrtWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSliceWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.cpp3
-rw-r--r--src/backends/neon/workloads/NeonStridedSliceWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonSubtractionWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp5
31 files changed, 112 insertions, 80 deletions
diff --git a/src/backends/neon/workloads/NeonAbsWorkload.cpp b/src/backends/neon/workloads/NeonAbsWorkload.cpp
index 7f8ed5a006..ea14ac3897 100644
--- a/src/backends/neon/workloads/NeonAbsWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAbsWorkload.cpp
@@ -9,8 +9,7 @@
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <boost/cast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
namespace armnn
{
@@ -28,8 +27,8 @@ NeonAbsWorkload::NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const Wor
{
m_Data.ValidateInputsOutputs("NeonAbsWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_AbsLayer.configure(&input, &output);
}
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp
index 916d67449c..4b2169a6ee 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp
@@ -5,7 +5,9 @@
#include "NeonActivationWorkload.hpp"
#include "NeonWorkloadUtils.hpp"
+
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/NEON/functions/NEActivationLayer.h>
@@ -36,8 +38,8 @@ NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor&
const arm_compute::ActivationLayerInfo activationLayerInfo =
ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEActivationLayer>();
layer->configure(&input, &output, activationLayerInfo);
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index a025c0b8f5..cb0c8a471f 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -7,6 +7,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEArithmeticAddition.h>
@@ -35,9 +36,9 @@ NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descri
{
m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1);
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input2 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEArithmeticAddition>();
layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index 0fa9d43b15..0fb819db0b 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -10,6 +10,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/TensorUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h>
@@ -54,8 +55,8 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des
const WorkloadInfo& info)
: BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info)
{
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index cd931e3797..ff777dbf9b 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -7,8 +7,9 @@
#include "NeonWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h>
@@ -53,8 +54,8 @@ NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload(
{
m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index b9cb807779..1cffbe1448 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -9,6 +9,7 @@
#include <BFloat16.hpp>
#include <Half.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <neon/NeonTensorHandle.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
@@ -41,9 +42,9 @@ void NeonConstantWorkload::Execute() const
ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::ITensor& output =
- boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
+ PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
- boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
+ PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
switch (computeDataType)
{
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 5d45642eef..144baec0ca 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -5,8 +5,9 @@
#include "NeonConvolution2dWorkload.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h>
@@ -65,8 +66,8 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
// todo: check tensor shapes match.
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
index 8b229a1cda..9ae82ff79f 100644
--- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
@@ -10,6 +10,7 @@
#include <arm_compute/runtime/NEON/functions/NEDequantizationLayer.h>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
@@ -32,8 +33,8 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor&
{
m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
std::unique_ptr<arm_compute::NEDequantizationLayer> layer(new arm_compute::NEDequantizationLayer());
layer->configure(&input, &output);
diff --git a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp
index 2ed47e4463..36f1cd98de 100644
--- a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp
@@ -9,8 +9,7 @@
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <boost/cast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
namespace armnn
{
@@ -85,7 +84,7 @@ NeonDetectionPostProcessWorkload::NeonDetectionPostProcessWorkload(
auto AclTensorRef = [](ITensorHandle* tensor) -> arm_compute::ITensor&
{
- return boost::polymorphic_downcast<IAclTensorHandle*>(tensor)->GetTensor();
+ return PolymorphicDowncast<IAclTensorHandle*>(tensor)->GetTensor();
};
arm_compute::ITensor& boxEncodings = AclTensorRef(m_Data.m_Inputs[0]);
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
index 6fdb455f25..fc353f136d 100644
--- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
@@ -4,7 +4,9 @@
//
#include "NeonDivisionWorkload.hpp"
+
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
namespace armnn
@@ -29,9 +31,9 @@ NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descri
{
m_Data.ValidateInputsOutputs("NeonDivisionWorkload", 2, 1);
- arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_DivLayer.configure(&input0, &input1, &output);
}
diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
index 5b4e9094fd..c49df33a54 100644
--- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
@@ -7,9 +7,9 @@
#include "NeonWorkloadUtils.hpp"
-#include <arm_compute/runtime/NEON/functions/NEFloor.h>
+#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/polymorphic_cast.hpp>
+#include <arm_compute/runtime/NEON/functions/NEFloor.h>
namespace armnn
{
@@ -19,8 +19,8 @@ NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descr
{
m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEFloor>();
layer->configure(&input, &output);
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 338c7eb1f6..e808c60c0c 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -8,6 +8,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
@@ -51,8 +52,8 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
{
m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index 9de6c82702..d54607d31e 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -8,6 +8,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h>
@@ -33,8 +34,8 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma
{
m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
index c433d81973..46d500bfdc 100644
--- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
@@ -5,6 +5,7 @@
#include "NeonMaximumWorkload.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
namespace armnn
@@ -29,9 +30,9 @@ NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonMaximumWorkload", 2, 1);
- arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_MaxLayer.configure(&input0, &input1, &output);
}
diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
index 2867a8079f..53e483a182 100644
--- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
@@ -4,7 +4,9 @@
//
#include "NeonMinimumWorkload.hpp"
+
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
namespace armnn
@@ -29,9 +31,9 @@ NeonMinimumWorkload::NeonMinimumWorkload(const MinimumQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonMinimumWorkload", 2, 1);
- arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_MinLayer.configure(&input0, &input1, &output);
}
diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
index 66fbedfa63..d813970901 100644
--- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
@@ -7,6 +7,8 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h>
namespace armnn
@@ -37,9 +39,9 @@ NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueue
{
m_Data.ValidateInputsOutputs("NeonMultiplicationWorkload", 2, 1);
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input2 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
// At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
// when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
diff --git a/src/backends/neon/workloads/NeonNegWorkload.cpp b/src/backends/neon/workloads/NeonNegWorkload.cpp
index afe05583fd..06c146754c 100644
--- a/src/backends/neon/workloads/NeonNegWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNegWorkload.cpp
@@ -9,8 +9,7 @@
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <boost/cast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
namespace armnn
{
@@ -28,8 +27,8 @@ NeonNegWorkload::NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descript
{
m_Data.ValidateInputsOutputs("NeonNegWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_NegLayer.configure(&input, &output);
}
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index 8cb4ec975d..77fc429b95 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -8,6 +8,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/NEON/functions/NENormalizationLayer.h>
@@ -77,8 +78,8 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati
throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality.");
}
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
index 9934c29a41..968d5ce02d 100644
--- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
@@ -7,6 +7,8 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <neon/NeonTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -37,8 +39,8 @@ NeonPooling2dWorkload::NeonPooling2dWorkload(
{
m_Data.ValidateInputsOutputs("NeonPooling2dWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonPreluWorkload.cpp b/src/backends/neon/workloads/NeonPreluWorkload.cpp
index 107090e704..8e6ea301de 100644
--- a/src/backends/neon/workloads/NeonPreluWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPreluWorkload.cpp
@@ -5,7 +5,9 @@
#include "NeonPreluWorkload.hpp"
#include "NeonWorkloadUtils.hpp"
+
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/NEON/functions/NEPReluLayer.h>
@@ -31,9 +33,9 @@ NeonPreluWorkload::NeonPreluWorkload(const PreluQueueDescriptor& descriptor,
{
m_Data.ValidateInputsOutputs("NeonPreluWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& alpha = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& alpha = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEPReluLayer>();
layer->configure(&input, &alpha, &output);
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
index 659bb94723..8b11da7253 100644
--- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
@@ -7,9 +7,9 @@
#include "NeonWorkloadUtils.hpp"
-#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
+#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/polymorphic_cast.hpp>
+#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
namespace armnn
{
@@ -29,8 +29,8 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEReshapeLayer>();
layer->configure(&input, &output);
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp
index e936ab7446..9e3be2655c 100644
--- a/src/backends/neon/workloads/NeonResizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp
@@ -9,7 +9,9 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+
#include <neon/NeonTensorHandle.hpp>
using namespace armnn::armcomputetensorutils;
@@ -45,8 +47,8 @@ NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor,
{
m_Data.ValidateInputsOutputs("NeonResizeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp
index b6292833dd..44980df996 100644
--- a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp
@@ -9,8 +9,8 @@
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/cast.hpp>
namespace armnn
{
@@ -28,8 +28,8 @@ NeonRsqrtWorkload::NeonRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, con
{
m_Data.ValidateInputsOutputs("NeonRsqrtWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_RsqrtLayer.configure(&input, &output);
}
diff --git a/src/backends/neon/workloads/NeonSliceWorkload.cpp b/src/backends/neon/workloads/NeonSliceWorkload.cpp
index 171edc6c59..32cc042eab 100644
--- a/src/backends/neon/workloads/NeonSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSliceWorkload.cpp
@@ -7,6 +7,8 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonTensorHandle.hpp>
@@ -37,8 +39,8 @@ NeonSliceWorkload::NeonSliceWorkload(const SliceQueueDescriptor& descriptor,
{
m_Data.ValidateInputsOutputs("NeonSliceWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::Coordinates starts;
arm_compute::Coordinates ends;
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
index 152d19cc04..a4690a7985 100644
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
@@ -8,6 +8,8 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
namespace armnn
@@ -20,8 +22,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor&
m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
// The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index 15a7066861..05d93b963c 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -7,6 +7,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
@@ -20,8 +21,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
{
m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
const auto outputQuantization = output.info()->quantization_info();
diff --git a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
index a4204b21e6..2982cd181d 100644
--- a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
@@ -5,6 +5,8 @@
#include "NeonSpaceToDepthWorkload.hpp"
#include "NeonWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <ResolveType.hpp>
namespace armnn
@@ -33,12 +35,12 @@ NeonSpaceToDepthWorkload::NeonSpaceToDepthWorkload(const SpaceToDepthQueueDescri
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
m_Layer.reset(new arm_compute::NESpaceToDepthLayer());
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
index 224e97af2d..19fa7c6389 100644
--- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
@@ -9,6 +9,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
@@ -74,7 +75,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri
return;
}
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
std::vector<arm_compute::ITensor *> aclOutputs;
for (auto output : m_Data.m_Outputs)
diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
index 356c0aea83..282005c7cc 100644
--- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
@@ -9,6 +9,7 @@
#include <neon/NeonTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
namespace armnn
@@ -50,8 +51,8 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri
{
m_Data.ValidateInputsOutputs("NeonStridedSliceWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::Coordinates starts;
arm_compute::Coordinates ends;
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
index f4b4707633..ccc2bfe58b 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
@@ -7,6 +7,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
@@ -34,9 +35,9 @@ NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescripto
{
m_Data.ValidateInputsOutputs("NeonSubtractionWorkload", 2, 1);
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input2 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEArithmeticSubtraction>();
layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index ffca2076fe..985f540e6a 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -9,6 +9,7 @@
#include <Profiling.hpp>
#include <armnn/Types.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -60,8 +61,8 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
{
m_Data.ValidateInputsOutputs("NeonTransposeConvolution2dWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);