aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/ClBackendContext.cpp5
-rw-r--r--src/backends/cl/ClTensorHandleFactory.cpp6
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp4
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp114
-rw-r--r--src/backends/cl/workloads/ClAbsWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClNegWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClRsqrtWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClSliceWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp1
9 files changed, 77 insertions, 74 deletions
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index f612c3743d..bfe93bdc01 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -8,14 +8,13 @@
#include <armnn/Logging.hpp>
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/core/CL/OpenCL.h>
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
#include <arm_compute/runtime/CL/CLTunerTypes.h>
-#include <boost/polymorphic_cast.hpp>
-
namespace armnn
{
@@ -161,7 +160,7 @@ ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options)
bool useLegacyTunerAPI = options.m_GpuAccTunedParameters.get() != nullptr;
if (useLegacyTunerAPI)
{
- auto clTunerParams = boost::polymorphic_downcast<ClTunedParameters*>(
+ auto clTunerParams = PolymorphicDowncast<ClTunedParameters*>(
options.m_GpuAccTunedParameters.get());
tuner = &clTunerParams->m_Tuner;
diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp
index 9df3f1a4a6..8af97f41e2 100644
--- a/src/backends/cl/ClTensorHandleFactory.cpp
+++ b/src/backends/cl/ClTensorHandleFactory.cpp
@@ -7,12 +7,12 @@
#include "ClTensorHandleFactory.hpp"
#include "ClTensorHandle.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <arm_compute/runtime/CL/CLTensor.h>
#include <arm_compute/core/Coordinates.h>
#include <arm_compute/runtime/CL/CLSubTensor.h>
-#include <boost/polymorphic_cast.hpp>
-
namespace armnn
{
@@ -42,7 +42,7 @@ std::unique_ptr<ITensorHandle> ClTensorHandleFactory::CreateSubTensorHandle(ITen
}
return std::make_unique<ClSubTensorHandle>(
- boost::polymorphic_downcast<IClTensorHandle *>(&parent), shape, coords);
+ PolymorphicDowncast<IClTensorHandle *>(&parent), shape, coords);
}
std::unique_ptr<ITensorHandle> ClTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index b1bd46c4d7..b0d2fdf835 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -10,6 +10,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Utils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
@@ -24,7 +25,6 @@
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <boost/polymorphic_cast.hpp>
#include <boost/format.hpp>
namespace armnn
@@ -125,7 +125,7 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH
}
return std::make_unique<ClSubTensorHandle>(
- boost::polymorphic_downcast<IClTensorHandle*>(&parent), shape, coords);
+ PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 92e771760f..b09b26f9b3 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -6,6 +6,7 @@
#include "ClContextControlFixture.hpp"
#include "ClWorkloadFactoryHelper.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <aclCommon/test/CreateWorkloadClNeon.hpp>
@@ -35,8 +36,8 @@ static void ClCreateActivationWorkloadTest()
// Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
ActivationQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 1}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 1}));
@@ -66,9 +67,9 @@ static void ClCreateElementwiseWorkloadTest()
// Checks that inputs/outputs are as we expect them (see definition of CreateElementwiseWorkloadTest).
DescriptorType queueDescriptor = workload->GetData();
- auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
@@ -159,8 +160,8 @@ static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op)
DescriptorType queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
@@ -184,8 +185,8 @@ static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
// Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
switch (dataLayout)
{
@@ -232,8 +233,8 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
auto workload = CreateConvertFp16ToFp32WorkloadTest<ClConvertFp16ToFp32Workload>(factory, graph);
ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
@@ -250,8 +251,8 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
auto workload = CreateConvertFp32ToFp16WorkloadTest<ClConvertFp32ToFp16Workload>(factory, graph);
ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
@@ -277,8 +278,8 @@ static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
// Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST((inputHandle->GetShape() == inputShape));
BOOST_TEST((outputHandle->GetShape() == outputShape));
}
@@ -315,8 +316,8 @@ static void ClDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
// Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
: std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
@@ -343,8 +344,8 @@ static void ClDirectConvolution2dWorkloadTest()
// Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
}
@@ -376,8 +377,8 @@ static void ClCreateFullyConnectedWorkloadTest()
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
}
@@ -404,8 +405,8 @@ static void ClNormalizationWorkloadTest(DataLayout dataLayout)
// Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
NormalizationQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({3, 5, 5, 1})
: std::initializer_list<unsigned int>({3, 1, 5, 5});
@@ -452,8 +453,8 @@ static void ClPooling2dWorkloadTest(DataLayout dataLayout)
// Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST((inputHandle->GetShape() == inputShape));
BOOST_TEST((outputHandle->GetShape() == outputShape));
@@ -497,9 +498,9 @@ static void ClCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
// Checks that outputs and inputs are as we expect them (see definition of CreatePreluWorkloadTest).
PreluQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto alphaHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto alphaHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST((inputHandle->GetShape() == inputShape));
BOOST_TEST((alphaHandle->GetShape() == alphaShape));
@@ -532,8 +533,8 @@ static void ClCreateReshapeWorkloadTest()
// Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
ReshapeQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 4}));
@@ -565,8 +566,8 @@ static void ClSoftmaxWorkloadTest()
// Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
@@ -594,16 +595,16 @@ static void ClSplitterWorkloadTest()
// Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
SplitterQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
- auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
+ auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
- auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
+ auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
- auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {1, 7, 7}));
}
@@ -738,8 +739,8 @@ static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
// Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
: std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
@@ -780,8 +781,8 @@ static void ClCreateLstmWorkloadTest()
auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
LstmQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
}
@@ -802,8 +803,8 @@ static void ClResizeWorkloadTest(DataLayout dataLayout)
auto queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
switch (dataLayout)
{
@@ -859,8 +860,8 @@ static void ClMeanWorkloadTest()
// Checks that inputs/outputs are as we expect them (see definition of CreateMeanWorkloadTest).
MeanQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
// The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 }));
@@ -893,9 +894,9 @@ static void ClCreateConcatWorkloadTest(std::initializer_list<unsigned int> outpu
auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
ConcatQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 }));
BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 }));
@@ -942,8 +943,8 @@ static void ClSpaceToDepthWorkloadTest()
auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 }));
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 }));
@@ -990,10 +991,10 @@ static void ClCreateStackWorkloadTest(const std::initializer_list<unsigned int>&
StackQueueDescriptor queueDescriptor = workload->GetData();
for (unsigned int i = 0; i < numInputs; ++i)
{
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
+ auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
}
- auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
}
@@ -1016,7 +1017,6 @@ template <typename QuantizedLstmWorkloadType>
static void ClCreateQuantizedLstmWorkloadTest()
{
using namespace armnn::armcomputetensorutils;
- using boost::polymorphic_downcast;
Graph graph;
ClWorkloadFactory factory =
@@ -1026,23 +1026,23 @@ static void ClCreateQuantizedLstmWorkloadTest()
QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
- IAclTensorHandle* inputHandle = polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
- IAclTensorHandle* cellStateInHandle = polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
+ IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
- IAclTensorHandle* outputStateInHandle = polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
+ IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
- IAclTensorHandle* cellStateOutHandle = polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
- IAclTensorHandle* outputStateOutHandle = polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
+ IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
}
diff --git a/src/backends/cl/workloads/ClAbsWorkload.cpp b/src/backends/cl/workloads/ClAbsWorkload.cpp
index 058c453c6b..d020eeb344 100644
--- a/src/backends/cl/workloads/ClAbsWorkload.cpp
+++ b/src/backends/cl/workloads/ClAbsWorkload.cpp
@@ -7,6 +7,8 @@
#include "ClWorkloadUtils.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -29,8 +31,8 @@ ClAbsWorkload::ClAbsWorkload(const AbsQueueDescriptor& descriptor, const Workloa
{
m_Data.ValidateInputsOutputs("ClAbsWorkload", 1, 1);
- arm_compute::ICLTensor& input = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ICLTensor& input = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_AbsLayer.configure(&input, &output);
}
diff --git a/src/backends/cl/workloads/ClNegWorkload.cpp b/src/backends/cl/workloads/ClNegWorkload.cpp
index cc6333fff9..9f83cd32c3 100644
--- a/src/backends/cl/workloads/ClNegWorkload.cpp
+++ b/src/backends/cl/workloads/ClNegWorkload.cpp
@@ -8,6 +8,7 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -29,8 +30,8 @@ ClNegWorkload::ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
{
m_Data.ValidateInputsOutputs("ClNegWorkload", 1, 1);
- arm_compute::ICLTensor& input = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ICLTensor& input = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_NegLayer.configure(&input, &output);
}
diff --git a/src/backends/cl/workloads/ClRsqrtWorkload.cpp b/src/backends/cl/workloads/ClRsqrtWorkload.cpp
index be687595f7..a305a4a919 100644
--- a/src/backends/cl/workloads/ClRsqrtWorkload.cpp
+++ b/src/backends/cl/workloads/ClRsqrtWorkload.cpp
@@ -8,6 +8,7 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -29,8 +30,8 @@ ClRsqrtWorkload::ClRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, const W
{
m_Data.ValidateInputsOutputs("ClRsqrtWorkload", 1, 1);
- arm_compute::ICLTensor& input = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ICLTensor& input = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_RsqrtLayer.configure(&input, &output);
}
diff --git a/src/backends/cl/workloads/ClSliceWorkload.cpp b/src/backends/cl/workloads/ClSliceWorkload.cpp
index fa99e7f54d..5ea4c4cefd 100644
--- a/src/backends/cl/workloads/ClSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClSliceWorkload.cpp
@@ -8,6 +8,7 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -36,8 +37,8 @@ ClSliceWorkload::ClSliceWorkload(const SliceQueueDescriptor& descriptor, const W
{
m_Data.ValidateInputsOutputs("ClSliceWorkload", 1, 1);
- arm_compute::ICLTensor& input = boost::polymorphic_downcast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = boost::polymorphic_downcast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ICLTensor& input = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::Coordinates starts;
arm_compute::Coordinates ends;
diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
index d541e4ec52..1acb5c64e6 100644
--- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
@@ -10,7 +10,6 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
namespace armnn
{