aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-09 09:07:37 +0100
committerJan Eilers <jan.eilers@arm.com>2020-09-17 08:31:09 +0000
commit171214c8ff275c90cd4f7fc23a34ec2c83b5ea39 (patch)
tree23fd3ee288d631c8c94bede71f89f0f1e12da862 /src/backends/cl/workloads
parenta25886e0966a6b9433cd23595688fadb88a161b2 (diff)
downloadarmnn-171214c8ff275c90cd4f7fc23a34ec2c83b5ea39.tar.gz
IVGCVSW-5300 Remove some boost::numeric_cast from armnn/backends
* Replaced with armnn/utility/NumericCast.hpp * Some exclusions in reference backend * Excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I9e4e9cd502c865452128fa04415fd6f250baa855
Diffstat (limited to 'src/backends/cl/workloads')
-rw-r--r--src/backends/cl/workloads/ClArgMinMaxWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp10
-rw-r--r--src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp8
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp7
8 files changed, 33 insertions, 25 deletions
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
index a79a7b286d..5910080859 100644
--- a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -36,7 +37,7 @@ arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo& input,
auto numDims = input.GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
if (descriptor.m_Function == ArgMinMaxFunction::Max)
{
@@ -60,7 +61,7 @@ ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descrip
auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max)
{
diff --git a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
index a714e031e4..1a7a8dca81 100644
--- a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
@@ -9,6 +9,8 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include "ClWorkloadUtils.hpp"
namespace armnn
@@ -27,8 +29,8 @@ ClBatchToSpaceNdWorkload::ClBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDesc
input.info()->set_data_layout(aclDataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
@@ -49,8 +51,8 @@ arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, dataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_BlockShape[1]);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, dataLayout);
diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
index 04885b18aa..43c81dc209 100644
--- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
@@ -8,12 +8,12 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
@@ -26,7 +26,7 @@ arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo& input,
DataLayout dataLayout = desc.m_DataLayout;
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_BlockSize);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, dataLayout);
@@ -48,7 +48,7 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor
PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ICLTensor& output =
PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index 9d06428902..fe9b45e054 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -9,6 +9,8 @@
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <arm_compute/runtime/CL/functions/CLLSTMLayer.h>
#include "ClWorkloadUtils.hpp"
@@ -132,8 +134,8 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor,
// Get the batch_size and the num_units from the cellStateIn dimensions
const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
- const unsigned int batch_size = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
- const unsigned int num_units = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
+ const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
+ const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
if (m_Data.m_Parameters.m_CifgEnabled)
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index b87658b3f9..443c56b7b5 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -9,6 +9,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -27,8 +28,8 @@ arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
@@ -55,8 +56,8 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(
armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
index 1acb5c64e6..f35fe0e3c9 100644
--- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
@@ -11,6 +11,8 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
namespace armnn
{
using namespace armcomputetensorutils;
@@ -26,7 +28,7 @@ ClSpaceToDepthWorkload::ClSpaceToDepthWorkload(const SpaceToDepthQueueDescriptor
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
@@ -47,7 +49,7 @@ arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo& input,
DataLayout dataLayout = desc.m_DataLayout;
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_BlockSize);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, dataLayout);
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index e434f9897f..c0b88b1193 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -5,6 +5,7 @@
#include "ClStackWorkload.hpp"
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -12,8 +13,6 @@
#include <arm_compute/core/Types.h>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -22,8 +21,8 @@ namespace
{
int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
{
- const int intAxis = boost::numeric_cast<int>(axis);
- return boost::numeric_cast<int>(inputDimensions) - intAxis;
+ const int intAxis = armnn::numeric_cast<int>(axis);
+ return armnn::numeric_cast<int>(inputDimensions) - intAxis;
}
} //namespace
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
index 6b0a34d90e..b094a910f4 100644
--- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -13,7 +13,8 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -36,7 +37,7 @@ arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo& input,
std::tie(starts, ends, strides) = SetClStridedSliceData(descriptor.m_Begin, descriptor.m_End, descriptor.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(input.GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions);
@@ -68,7 +69,7 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor
m_Data.m_Parameters.m_End,
m_Data.m_Parameters.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions);