aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-04-10 13:00:44 +0100
committerJan Eilers <jan.eilers@arm.com>2020-04-14 09:24:26 +0100
commit3c9e04563b9fb7d7aadc61834909a9ffc6b1769c (patch)
tree367af149439d56a01eba1b522b7486aca5d56012 /src/backends/neon/workloads/NeonQuantizeWorkload.cpp
parent76bc728bc1681ed216ffe6f7720f3f57b5137fab (diff)
downloadarmnn-3c9e04563b9fb7d7aadc61834909a9ffc6b1769c.tar.gz
IVGCVSW-4483 Removes boost::polymorphic_pointer_downcast
* replace boost::polymorphic_pointer_downcast by PolymorphicPointerDowncast * replaced/removed includes Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I0ef934a3804cf05e4c38dec6c4ec49c76111a302
Diffstat (limited to 'src/backends/neon/workloads/NeonQuantizeWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonQuantizeWorkload.cpp7
1 files changed, 3 insertions, 4 deletions
diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
index 4f3ea2c3c1..14fbdf3dd9 100644
--- a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
@@ -8,10 +8,9 @@
#include <neon/NeonTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/core/Types.h>
-#include <boost/polymorphic_pointer_cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -30,9 +29,9 @@ NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descri
{
m_Data.ValidateInputsOutputs("NeonQuantizeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+ arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(
m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+ arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(
m_Data.m_Outputs[0])->GetTensor();
m_Layer.reset(new arm_compute::NEQuantizationLayer());