aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonReshapeWorkload.cpp
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-04-02 13:56:54 +0100
committerJan Eilers <jan.eilers@arm.com>2020-04-10 10:11:11 +0100
commitbb446e576e120512d5752a5d6dc1ddc636f563ba (patch)
tree147d0b5f2886af208199a24704afd845a4825bf8 /src/backends/neon/workloads/NeonReshapeWorkload.cpp
parente5d0b93b152a26faf93538eb719d03e5b477d670 (diff)
downloadarmnn-bb446e576e120512d5752a5d6dc1ddc636f563ba.tar.gz
IVGCVSW-4483 Remove boost::polymorphic_downcast
* exchange boost::polymorphic_downcast with armnn::PolymorphicDowncast * remove unnecessary includes of boost::polymorphic_downcast Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Ie603fb82860fe05fee547dc78073230cc62b2e1f
Diffstat (limited to 'src/backends/neon/workloads/NeonReshapeWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonReshapeWorkload.cpp8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
index 659bb94723..8b11da7253 100644
--- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
@@ -7,9 +7,9 @@
#include "NeonWorkloadUtils.hpp"
-#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
+#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/polymorphic_cast.hpp>
+#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
namespace armnn
{
@@ -29,8 +29,8 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEReshapeLayer>();
layer->configure(&input, &output);