aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-04-10 13:00:44 +0100
committerJan Eilers <jan.eilers@arm.com>2020-04-14 09:24:26 +0100
commit3c9e04563b9fb7d7aadc61834909a9ffc6b1769c (patch)
tree367af149439d56a01eba1b522b7486aca5d56012 /src/backends/cl/workloads
parent76bc728bc1681ed216ffe6f7720f3f57b5137fab (diff)
downloadarmnn-3c9e04563b9fb7d7aadc61834909a9ffc6b1769c.tar.gz
IVGCVSW-4483 Removes boost::polymorphic_pointer_downcast
* replace boost::polymorphic_pointer_downcast by PolymorphicPointerDowncast * replaced/removed includes Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I0ef934a3804cf05e4c38dec6c4ec49c76111a302
Diffstat (limited to 'src/backends/cl/workloads')
-rw-r--r--src/backends/cl/workloads/ClConcatWorkload.cpp10
-rw-r--r--src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClDequantizeWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClSplitterWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp6
6 files changed, 20 insertions, 21 deletions
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index 5370466163..e0aebd30cb 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -5,6 +5,7 @@
#include "ClConcatWorkload.hpp"
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -12,8 +13,6 @@
#include <arm_compute/core/Types.h>
#include <arm_compute/runtime/CL/functions/CLConcatenateLayer.h>
-#include <boost/polymorphic_pointer_cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -72,11 +71,12 @@ ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, cons
std::vector<arm_compute::ICLTensor *> aclInputs;
for (auto input : m_Data.m_Inputs)
{
- arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+ arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
aclInputs.emplace_back(&aclInput);
}
- arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
- m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::ICLTensor& output =
+ armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
// Create the layer function
auto layer = std::make_unique<arm_compute::CLConcatenateLayer>();
diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
index 800a98409e..04885b18aa 100644
--- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
@@ -8,11 +8,11 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <cl/ClTensorHandle.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
namespace armnn
{
@@ -45,13 +45,13 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
arm_compute::ICLTensor& input =
- boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+ PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ICLTensor& output =
- boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+ PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
m_Layer.configure(&input, &output, blockSize);
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
index eca795de7e..eb63900380 100644
--- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
@@ -7,6 +7,7 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <arm_compute/core/Types.h>
@@ -14,8 +15,6 @@
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -34,10 +33,10 @@ ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& desc
{
m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1);
- arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
m_Data.m_Outputs[0])->GetTensor();
m_Layer.reset(new arm_compute::CLDequantizationLayer());
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index 64da92c815..b87658b3f9 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -9,13 +9,12 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -51,9 +50,9 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(
m_Data.ValidateInputsOutputs("ClSpaceToBatchNdWorkload", 1, 1);
arm_compute::ICLTensor& input =
- boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+ armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output =
- boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+ armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
// ArmNN blockShape is [H, W] Cl asks for W, H
int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
index 296e0a3dde..045fbb7595 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.cpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -10,6 +10,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <arm_compute/runtime/CL/functions/CLSplit.h>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -74,13 +75,13 @@ ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor
return;
}
- arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
m_Data.m_Inputs[0])->GetTensor();
std::vector<arm_compute::ICLTensor *> aclOutputs;
for (auto output : m_Data.m_Outputs)
{
- arm_compute::ICLTensor& aclOutput = boost::polymorphic_pointer_downcast<IClTensorHandle>(output)->GetTensor();
+ arm_compute::ICLTensor& aclOutput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(output)->GetTensor();
aclOutputs.emplace_back(&aclOutput);
}
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index 3ba698ec4d..e434f9897f 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -5,6 +5,7 @@
#include "ClStackWorkload.hpp"
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -12,7 +13,6 @@
#include <arm_compute/core/Types.h>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
namespace armnn
{
@@ -51,10 +51,10 @@ ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, const W
std::vector<arm_compute::ICLTensor*> aclInputs;
for (auto input : m_Data.m_Inputs)
{
- arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+ arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
aclInputs.emplace_back(&aclInput);
}
- arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
m_Data.m_Outputs[0])->GetTensor();
m_Layer.reset(new arm_compute::CLStackLayer());