aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-09 09:07:37 +0100
committerJan Eilers <jan.eilers@arm.com>2020-09-17 08:31:09 +0000
commit171214c8ff275c90cd4f7fc23a34ec2c83b5ea39 (patch)
tree23fd3ee288d631c8c94bede71f89f0f1e12da862 /src/backends/neon
parenta25886e0966a6b9433cd23595688fadb88a161b2 (diff)
downloadarmnn-171214c8ff275c90cd4f7fc23a34ec2c83b5ea39.tar.gz
IVGCVSW-5300 Remove some boost::numeric_cast from armnn/backends
* Replaced with armnn/utility/NumericCast.hpp * Some exclusions in reference backend * Excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I9e4e9cd502c865452128fa04415fd6f250baa855
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonTensorHandleFactory.cpp3
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp3
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp5
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonStackWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonStridedSliceWorkload.cpp5
11 files changed, 39 insertions, 28 deletions
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
index 1dd83950cd..ce3ce5c0d7 100644
--- a/src/backends/neon/NeonTensorHandleFactory.cpp
+++ b/src/backends/neon/NeonTensorHandleFactory.cpp
@@ -9,6 +9,7 @@
#include "Layer.hpp"
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
namespace armnn
@@ -29,7 +30,7 @@ std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateSubTensorHandle(IT
{
// Arm compute indexes tensor coords in reverse order.
unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
- coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 928989b1e4..709dd93e9b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -12,6 +12,7 @@
#include <armnn/Utils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -73,7 +74,7 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITenso
{
// Arm compute indexes tensor coords in reverse order.
unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
- coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 3cea29323a..e6d740280d 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -8,6 +8,7 @@
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTensorHandleFactory.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <test/GraphUtils.hpp>
@@ -366,7 +367,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
for (unsigned int i = 0; i < outputShapes.size(); ++i)
{
TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
}
@@ -541,7 +542,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
for (unsigned int i = 0; i < outputShapes.size(); ++i)
{
TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
}
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index 0fb819db0b..6290ecce17 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -10,6 +10,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/TensorUtils.hpp>
@@ -36,7 +37,7 @@ arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo& input,
auto numDims = input.GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
if (descriptor.m_Function == ArgMinMaxFunction::Max)
{
@@ -60,7 +61,7 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des
auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
index d2f538745c..3d479ff80d 100644
--- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
@@ -7,7 +7,9 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <ResolveType.hpp>
namespace armnn
@@ -23,8 +25,8 @@ arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, desc.m_DataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_BlockShape[1]);
const arm_compute::Status aclStatus = arm_compute::NEBatchToSpaceLayer::validate(&aclInputInfo,
blockWidth,
@@ -49,8 +51,8 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue
output.info()->set_data_layout(aclDataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
m_Layer.reset(new arm_compute::NEBatchToSpaceLayer());
m_Layer->configure(&input, blockWidth, blockHeight, &output);
diff --git a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
index 12e7d206bf..2c4a6517e7 100644
--- a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
@@ -8,10 +8,9 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
@@ -25,7 +24,7 @@ arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, dataLayout);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(descriptor.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(descriptor.m_BlockSize);
return arm_compute::NEDepthToSpaceLayer::validate(&aclInput, &aclOutput, blockSize);
}
@@ -42,7 +41,7 @@ NeonDepthToSpaceWorkload::NeonDepthToSpaceWorkload(const DepthToSpaceQueueDescri
PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ITensor& output =
PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index d5f3c5de34..175e908817 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -8,6 +8,8 @@
#include "aclCommon/ArmComputeTensorUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
+
#include "neon/NeonTensorHandle.hpp"
namespace armnn
@@ -131,8 +133,8 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descript
// Get the batch_size and the num_units from the cellStateIn dimensions
const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
- const unsigned int batch_size = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
- const unsigned int num_units = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
+ const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
+ const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
if (m_Data.m_Parameters.m_CifgEnabled)
diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
index d68ab4c4ac..42dd49cdc1 100644
--- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
@@ -7,7 +7,9 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <ResolveType.hpp>
namespace armnn
@@ -23,8 +25,8 @@ arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
@@ -51,8 +53,8 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue
PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
diff --git a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
index 2982cd181d..43c991cfb2 100644
--- a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
@@ -6,7 +6,9 @@
#include "NeonSpaceToDepthWorkload.hpp"
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <ResolveType.hpp>
namespace armnn
@@ -22,7 +24,7 @@ arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, dataLayout);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(descriptor.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(descriptor.m_BlockSize);
return arm_compute::NESpaceToDepthLayer::validate(&aclInput, &aclOutput, blockSize);
}
@@ -38,7 +40,7 @@ NeonSpaceToDepthWorkload::NeonSpaceToDepthWorkload(const SpaceToDepthQueueDescri
arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp
index a3ba8d888d..696de65620 100644
--- a/src/backends/neon/workloads/NeonStackWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStackWorkload.cpp
@@ -6,12 +6,11 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -20,8 +19,8 @@ namespace
{
int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
{
- const int intAxis = boost::numeric_cast<int>(axis);
- return boost::numeric_cast<int>(inputDimensions) - intAxis;
+ const int intAxis = armnn::numeric_cast<int>(axis);
+ return armnn::numeric_cast<int>(inputDimensions) - intAxis;
}
} //namespace
diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
index 282005c7cc..d0aee07f9b 100644
--- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
@@ -9,6 +9,7 @@
#include <neon/NeonTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
@@ -30,7 +31,7 @@ arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo& input,
descriptor.m_End,
descriptor.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(input.GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions);
@@ -62,7 +63,7 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri
m_Data.m_Parameters.m_End,
m_Data.m_Parameters.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions);