aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-09 09:07:37 +0100
committerJan Eilers <jan.eilers@arm.com>2020-09-17 08:31:09 +0000
commit171214c8ff275c90cd4f7fc23a34ec2c83b5ea39 (patch)
tree23fd3ee288d631c8c94bede71f89f0f1e12da862 /src/backends
parenta25886e0966a6b9433cd23595688fadb88a161b2 (diff)
downloadarmnn-171214c8ff275c90cd4f7fc23a34ec2c83b5ea39.tar.gz
IVGCVSW-5300 Remove some boost::numeric_cast from armnn/backends
* Replaced with armnn/utility/NumericCast.hpp * Some exclusions in reference backend * Excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I9e4e9cd502c865452128fa04415fd6f250baa855
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.hpp10
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp6
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp7
-rw-r--r--src/backends/backendsCommon/test/ActivationFixture.hpp11
-rw-r--r--src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp101
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp19
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp17
-rw-r--r--src/backends/cl/ClTensorHandleFactory.cpp3
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp3
-rw-r--r--src/backends/cl/workloads/ClArgMinMaxWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp10
-rw-r--r--src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp8
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp7
-rw-r--r--src/backends/neon/NeonTensorHandleFactory.cpp3
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp3
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp5
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonStackWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonStridedSliceWorkload.cpp5
-rw-r--r--src/backends/reference/RefLayerSupport.cpp7
-rw-r--r--src/backends/reference/workloads/ArgMinMax.cpp4
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp7
-rw-r--r--src/backends/reference/workloads/Gather.cpp5
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp9
-rw-r--r--src/backends/reference/workloads/Mean.cpp4
-rw-r--r--src/backends/reference/workloads/Pooling2d.cpp53
-rw-r--r--src/backends/reference/workloads/RefL2NormalizationWorkload.cpp17
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.cpp25
-rw-r--r--src/backends/reference/workloads/StridedSlice.cpp13
45 files changed, 262 insertions, 228 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index 67676784ef..011f44dc69 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -7,6 +7,8 @@
#include <armnn/Tensor.hpp>
#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <arm_compute/core/ITensor.h>
#include <arm_compute/core/TensorInfo.h>
#include <arm_compute/core/Types.h>
@@ -14,8 +16,6 @@
#include <Half.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
class ITensorHandle;
@@ -126,7 +126,7 @@ inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
coords.set(2, static_cast<int>(channelIndex));
coords.set(1, static_cast<int>(y));
coords.set(0, static_cast<int>(x));
- return boost::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
+ return armnn::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
}
// Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
@@ -229,9 +229,9 @@ TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
{
- s[(shapelike.num_dimensions()-1)-i] = boost::numeric_cast<unsigned int>(shapelike[i]);
+ s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
}
- return TensorShape(boost::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
+ return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
};
/// Get the strides from an ACL strides object
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index c7650dc58d..07ce14b763 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -7,6 +7,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <algorithm>
#include <iomanip>
@@ -14,7 +15,6 @@
#include <sstream>
#include <boost/format.hpp>
-#include <boost/numeric/conversion/cast.hpp>
using namespace armnnUtils;
@@ -306,7 +306,7 @@ void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
}
outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
}
- TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
+ TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
if (broadcastShape != output.GetShape())
{
throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
@@ -2306,7 +2306,7 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
else
{
unsigned int outputDim =
- inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
+ inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
ValidateTensorNumDimensions(outputTensorInfo,
descriptorName,
outputDim > 0 ? outputDim : 1,
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 37915cfc4d..5886630cd9 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -6,8 +6,7 @@
#include <backendsCommon/WorkloadUtils.hpp>
#include <armnn/Utils.hpp>
-
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
namespace armnn
{
@@ -194,12 +193,12 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
{
int32_t reversedMask = 0;
- for (unsigned int i = 0; i < boost::numeric_cast<unsigned int>(numDim); ++i)
+ for (unsigned int i = 0; i < armnn::numeric_cast<unsigned int>(numDim); ++i)
{
// Check if bit set in mask for each dimension
int32_t bit = (mask & 1 << i) != 0;
// Increment the new mask with the bits reversed
- reversedMask += (bit << std::max(numDim-(boost::numeric_cast<int>(i)+1), 0));
+ reversedMask += (bit << std::max(numDim-(armnn::numeric_cast<int>(i)+1), 0));
}
return reversedMask;
diff --git a/src/backends/backendsCommon/test/ActivationFixture.hpp b/src/backends/backendsCommon/test/ActivationFixture.hpp
index 8ff77f6c5d..d28174d6a6 100644
--- a/src/backends/backendsCommon/test/ActivationFixture.hpp
+++ b/src/backends/backendsCommon/test/ActivationFixture.hpp
@@ -7,9 +7,10 @@
#include "TensorCopyUtils.hpp"
#include "WorkloadTestUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
+
#include <test/TensorHelpers.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <boost/multi_array.hpp>
struct ActivationFixture
@@ -17,10 +18,10 @@ struct ActivationFixture
ActivationFixture()
{
auto boostArrayExtents = boost::extents
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
output.resize(boostArrayExtents);
outputExpected.resize(boostArrayExtents);
input.resize(boostArrayExtents);
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index dc53b7b246..c705f87b85 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
#include <armnn/INetwork.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <boost/test/unit_test.hpp>
#include <vector>
@@ -34,7 +36,7 @@ INetworkPtr CreateComparisonNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset);
- IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(input, comparisonLayer, inputTensorInfo, 0, i);
}
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index ded3857282..58a1f39a78 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
#include <armnn/INetwork.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <boost/test/unit_test.hpp>
#include <vector>
@@ -38,7 +40,7 @@ INetworkPtr CreateConcatNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
- IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(input, concat, inputTensorInfo, 0, i);
}
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index 4c93735bc8..5fedaa2171 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
#include <armnn/INetwork.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <boost/test/unit_test.hpp>
#include <vector>
@@ -32,7 +34,7 @@ INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape,
IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
- IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(0));
+ IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index b06b30c935..404a412ca0 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -13,6 +13,8 @@
#include <armnn/INetwork.hpp>
#include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <test/TensorHelpers.hpp>
#include <boost/test/unit_test.hpp>
@@ -27,9 +29,9 @@ using MultiArray = const boost::multi_array<uint8_t, 2>&;
armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
MultiArray expectedOutput)
{
- auto batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
- auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
- auto outputSize = boost::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
+ auto batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+ auto outputSize = armnn::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
float inputOutputScale = 0.0078125f;
int32_t inputOutputOffset = 128;
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 6c4c1772b6..257a81b50b 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -8,6 +8,8 @@
#include <armnn/INetwork.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <backendsCommon/test/CommonTestUtils.hpp>
#include <boost/test/unit_test.hpp>
@@ -63,7 +65,7 @@ INetworkPtr CreateSplitterNetwork(const TensorShape& inputShape,
for (unsigned int i = 0; i < outputShapes.size(); ++i)
{
TensorInfo outputTensorInfo(outputShapes[i], DataType, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(splitter, output, outputTensorInfo, i, 0);
}
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 543ea7716a..6d83b1ca99 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -13,6 +13,8 @@
#include <backendsCommon/test/WorkloadTestUtils.hpp>
#include <reference/test/RefWorkloadFactoryHelper.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <test/TensorHelpers.hpp>
#include <boost/multi_array.hpp>
@@ -1261,10 +1263,10 @@ LayerTestResult<T,4> CompareActivationTestImpl(
LayerTestResult<T,4> ret(outputTensorInfo);
auto boostArrayExtents = boost::extents
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
- [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
+ [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
ret.output.resize(boostArrayExtents);
ret.outputExpected.resize(boostArrayExtents);
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index e99a26e81e..690d1cd66f 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -9,6 +9,7 @@
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
@@ -219,20 +220,20 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
uint32_t dilationY = 1)
{
armnn::IgnoreUnused(memoryManager);
- unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
- unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
- unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
- unsigned int inputNum = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInput.shape()[2]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInput.shape()[3]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInput.shape()[1]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInput.shape()[0]);
- unsigned int outputHeight = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
- unsigned int outputWidth = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
- unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
- unsigned int outputNum = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
- unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
- unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
- unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
- unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+ unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernel.shape()[0]);
bool biasEnabled = bias.size() > 0;
@@ -385,20 +386,20 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
uint32_t strideY = 1)
{
armnn::IgnoreUnused(qScale, qOffset);
- unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
- unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[3]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[2]);
- unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
- unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
- unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
- unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
+ unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernel.shape()[0]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernel.shape()[3]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernel.shape()[1]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernel.shape()[2]);
- unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
- unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
- unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[2]);
bool biasEnabled = bias.size() > 0;
@@ -1643,18 +1644,18 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
uint32_t strideX = 1,
uint32_t strideY = 1)
{
- unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[2]);
- unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[3]);
- unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
- unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
- unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
- unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
- unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
- unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
- unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[2]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[3]);
+ unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernel.shape()[0]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernel.shape()[1]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernel.shape()[2]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernel.shape()[3]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[3]);
// If a bias is used, its size must equal the number of output channels.
bool biasEnabled = bias.size() > 0;
@@ -2151,20 +2152,20 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
uint32_t dilationX = 1,
uint32_t dilationY = 1)
{
- unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
- unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
- unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
- unsigned int inputNum = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
-
- unsigned int outputHeight = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
- unsigned int outputWidth = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
- unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
- unsigned int outputNum = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
-
- unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
- unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
- unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
- unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInput.shape()[2]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInput.shape()[3]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInput.shape()[1]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInput.shape()[0]);
+
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+ unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernel.shape()[0]);
bool biasEnabled = bias.size() > 0;
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 8f39f42452..088ca3b4c2 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -7,6 +7,7 @@
#include <QuantizeHelper.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -144,9 +145,9 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
armnn::DataType constantDataType = armnn::DataType::Float32)
{
IgnoreUnused(memoryManager);
- unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+ unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
// cellSize and outputSize have the same size when there is no projection.
unsigned numUnits = outputSize;
@@ -1069,10 +1070,10 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
bool peepholeEnabled = true;
bool projectionEnabled = false;
// These are not the input and the output of Lstm yet
- unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
+ unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
const unsigned int cellSize = outputSize;
@@ -1560,9 +1561,9 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
const boost::multi_array<uint8_t, 2>& outputExpected)
{
IgnoreUnused(memoryManager);
- auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
- auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
- auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ auto numBatches = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+ auto outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
// Scale/Offset for input/output, cellState In/Out, weights, bias
float inputOutputScale = 0.0078125f;
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index b42b180dc9..2e8e16f0c2 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -8,6 +8,8 @@
#include <armnn/Exceptions.hpp>
#include <armnn/LayerSupport.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -102,7 +104,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
// pow((kappa + (accumulatedScale * alpha)), beta)
// ...where accumulatedScale is the sum of every element squared.
float divisor[inputNum];
- for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
+ for(int i = 0; i < armnn::numeric_cast<int>(inputNum); i++)
{
float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
input[i][0][0][1]*input[i][0][0][1] +
@@ -129,11 +131,11 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
// ...where adjacent channels means within half the normSize for the channel
// The test data has only one channel, so this is simplified below.
std::vector<float> outputVector;
- for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
+ for (int n = 0; n < armnn::numeric_cast<int>(inputNum); ++n)
{
- for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
+ for (int h = 0; h < armnn::numeric_cast<int>(inputHeight); ++h)
{
- for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
+ for (int w = 0; w < armnn::numeric_cast<int>(inputWidth); ++w)
{
float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
float scale = powf((kappa + accumulatedScale * alpha), -beta);
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index a4f87ff3ed..70e2e61475 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -15,6 +15,7 @@
#include <armnnUtils/Permute.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <backendsCommon/WorkloadInfo.hpp>
@@ -48,15 +49,15 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
auto widthIndex = dimensionIndices.GetWidthIndex();
auto channelsIndex = dimensionIndices.GetChannelsIndex();
- unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
- unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
- unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
- unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[heightIndex]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[widthIndex]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
+ unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
- unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
- unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
- unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
+ unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp
index e92913f196..33995f7b34 100644
--- a/src/backends/cl/ClTensorHandleFactory.cpp
+++ b/src/backends/cl/ClTensorHandleFactory.cpp
@@ -7,6 +7,7 @@
#include "ClTensorHandleFactory.hpp"
#include "ClTensorHandle.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/CL/CLTensor.h>
@@ -31,7 +32,7 @@ std::unique_ptr<ITensorHandle> ClTensorHandleFactory::CreateSubTensorHandle(ITen
{
// Arm compute indexes tensor coords in reverse order.
unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
- coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4acfa570f2..f6650dc2d0 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -11,6 +11,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Utils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -130,7 +131,7 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH
{
// Arm compute indexes tensor coords in reverse order.
unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
- coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
index a79a7b286d..5910080859 100644
--- a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -36,7 +37,7 @@ arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo& input,
auto numDims = input.GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
if (descriptor.m_Function == ArgMinMaxFunction::Max)
{
@@ -60,7 +61,7 @@ ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descrip
auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max)
{
diff --git a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
index a714e031e4..1a7a8dca81 100644
--- a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
@@ -9,6 +9,8 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include "ClWorkloadUtils.hpp"
namespace armnn
@@ -27,8 +29,8 @@ ClBatchToSpaceNdWorkload::ClBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDesc
input.info()->set_data_layout(aclDataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
@@ -49,8 +51,8 @@ arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, dataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_BlockShape[1]);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, dataLayout);
diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
index 04885b18aa..43c81dc209 100644
--- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
@@ -8,12 +8,12 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
@@ -26,7 +26,7 @@ arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo& input,
DataLayout dataLayout = desc.m_DataLayout;
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_BlockSize);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, dataLayout);
@@ -48,7 +48,7 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor
PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ICLTensor& output =
PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index 9d06428902..fe9b45e054 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -9,6 +9,8 @@
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <arm_compute/runtime/CL/functions/CLLSTMLayer.h>
#include "ClWorkloadUtils.hpp"
@@ -132,8 +134,8 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor &descriptor,
// Get the batch_size and the num_units from the cellStateIn dimensions
const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
- const unsigned int batch_size = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
- const unsigned int num_units = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
+ const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
+ const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
if (m_Data.m_Parameters.m_CifgEnabled)
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index b87658b3f9..443c56b7b5 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -9,6 +9,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -27,8 +28,8 @@ arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
@@ -55,8 +56,8 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(
armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
index 1acb5c64e6..f35fe0e3c9 100644
--- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
@@ -11,6 +11,8 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
namespace armnn
{
using namespace armcomputetensorutils;
@@ -26,7 +28,7 @@ ClSpaceToDepthWorkload::ClSpaceToDepthWorkload(const SpaceToDepthQueueDescriptor
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
@@ -47,7 +49,7 @@ arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo& input,
DataLayout dataLayout = desc.m_DataLayout;
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_BlockSize);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, dataLayout);
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index e434f9897f..c0b88b1193 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -5,6 +5,7 @@
#include "ClStackWorkload.hpp"
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
@@ -12,8 +13,6 @@
#include <arm_compute/core/Types.h>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -22,8 +21,8 @@ namespace
{
int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
{
- const int intAxis = boost::numeric_cast<int>(axis);
- return boost::numeric_cast<int>(inputDimensions) - intAxis;
+ const int intAxis = armnn::numeric_cast<int>(axis);
+ return armnn::numeric_cast<int>(inputDimensions) - intAxis;
}
} //namespace
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
index 6b0a34d90e..b094a910f4 100644
--- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -13,7 +13,8 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -36,7 +37,7 @@ arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo& input,
std::tie(starts, ends, strides) = SetClStridedSliceData(descriptor.m_Begin, descriptor.m_End, descriptor.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(input.GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions);
@@ -68,7 +69,7 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor
m_Data.m_Parameters.m_End,
m_Data.m_Parameters.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions);
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
index 1dd83950cd..ce3ce5c0d7 100644
--- a/src/backends/neon/NeonTensorHandleFactory.cpp
+++ b/src/backends/neon/NeonTensorHandleFactory.cpp
@@ -9,6 +9,7 @@
#include "Layer.hpp"
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
namespace armnn
@@ -29,7 +30,7 @@ std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateSubTensorHandle(IT
{
// Arm compute indexes tensor coords in reverse order.
unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
- coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 928989b1e4..709dd93e9b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -12,6 +12,7 @@
#include <armnn/Utils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -73,7 +74,7 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITenso
{
// Arm compute indexes tensor coords in reverse order.
unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
- coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+ coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 3cea29323a..e6d740280d 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -8,6 +8,7 @@
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTensorHandleFactory.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <test/GraphUtils.hpp>
@@ -366,7 +367,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
for (unsigned int i = 0; i < outputShapes.size(); ++i)
{
TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
}
@@ -541,7 +542,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
for (unsigned int i = 0; i < outputShapes.size(); ++i)
{
TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(boost::numeric_cast<LayerBindingId>(i));
+ IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
}
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index 0fb819db0b..6290ecce17 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -10,6 +10,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/TensorUtils.hpp>
@@ -36,7 +37,7 @@ arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo& input,
auto numDims = input.GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
if (descriptor.m_Function == ArgMinMaxFunction::Max)
{
@@ -60,7 +61,7 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des
auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
- int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
index d2f538745c..3d479ff80d 100644
--- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
@@ -7,7 +7,9 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <ResolveType.hpp>
namespace armnn
@@ -23,8 +25,8 @@ arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, desc.m_DataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_BlockShape[1]);
const arm_compute::Status aclStatus = arm_compute::NEBatchToSpaceLayer::validate(&aclInputInfo,
blockWidth,
@@ -49,8 +51,8 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue
output.info()->set_data_layout(aclDataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockShape[1]);
m_Layer.reset(new arm_compute::NEBatchToSpaceLayer());
m_Layer->configure(&input, blockWidth, blockHeight, &output);
diff --git a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
index 12e7d206bf..2c4a6517e7 100644
--- a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
@@ -8,10 +8,9 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
@@ -25,7 +24,7 @@ arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, dataLayout);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(descriptor.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(descriptor.m_BlockSize);
return arm_compute::NEDepthToSpaceLayer::validate(&aclInput, &aclOutput, blockSize);
}
@@ -42,7 +41,7 @@ NeonDepthToSpaceWorkload::NeonDepthToSpaceWorkload(const DepthToSpaceQueueDescri
PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ITensor& output =
PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index d5f3c5de34..175e908817 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -8,6 +8,8 @@
#include "aclCommon/ArmComputeTensorUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
+
#include "neon/NeonTensorHandle.hpp"
namespace armnn
@@ -131,8 +133,8 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descript
// Get the batch_size and the num_units from the cellStateIn dimensions
const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
- const unsigned int batch_size = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
- const unsigned int num_units = boost::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
+ const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
+ const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
if (m_Data.m_Parameters.m_CifgEnabled)
diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
index d68ab4c4ac..42dd49cdc1 100644
--- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
@@ -7,7 +7,9 @@
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <ResolveType.hpp>
namespace armnn
@@ -23,8 +25,8 @@ arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
@@ -51,8 +53,8 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue
PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
// ArmNN blockShape is [H, W] Cl asks for W, H
- int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
- int32_t blockWidth = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
+ int32_t blockHeight = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
+ int32_t blockWidth = armnn::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);
arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
diff --git a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
index 2982cd181d..43c991cfb2 100644
--- a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
@@ -6,7 +6,9 @@
#include "NeonSpaceToDepthWorkload.hpp"
#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+
#include <ResolveType.hpp>
namespace armnn
@@ -22,7 +24,7 @@ arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, dataLayout);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, dataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(descriptor.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(descriptor.m_BlockSize);
return arm_compute::NESpaceToDepthLayer::validate(&aclInput, &aclOutput, blockSize);
}
@@ -38,7 +40,7 @@ NeonSpaceToDepthWorkload::NeonSpaceToDepthWorkload(const SpaceToDepthQueueDescri
arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
input.info()->set_data_layout(aclDataLayout);
- int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
+ int32_t blockSize = armnn::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
output.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp
index a3ba8d888d..696de65620 100644
--- a/src/backends/neon/workloads/NeonStackWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStackWorkload.cpp
@@ -6,12 +6,11 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
using namespace armcomputetensorutils;
@@ -20,8 +19,8 @@ namespace
{
int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
{
- const int intAxis = boost::numeric_cast<int>(axis);
- return boost::numeric_cast<int>(inputDimensions) - intAxis;
+ const int intAxis = armnn::numeric_cast<int>(axis);
+ return armnn::numeric_cast<int>(inputDimensions) - intAxis;
}
} //namespace
diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
index 282005c7cc..d0aee07f9b 100644
--- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
@@ -9,6 +9,7 @@
#include <neon/NeonTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
@@ -30,7 +31,7 @@ arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo& input,
descriptor.m_End,
descriptor.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(input.GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions);
@@ -62,7 +63,7 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri
m_Data.m_Parameters.m_End,
m_Data.m_Parameters.m_Stride);
- auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
+ auto numDimensions = armnn::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions);
int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions);
int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 5e3c96d31d..52c079fae4 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -9,17 +9,14 @@
#include <armnn/Types.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <LayerSupportCommon.hpp>
#include <backendsCommon/LayerSupportRules.hpp>
-#include <boost/cast.hpp>
-
#include <vector>
#include <array>
-using namespace boost;
-
namespace armnn
{
@@ -1326,7 +1323,7 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
}
else
{
- auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(descriptor.m_Axis.size());
+ auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
if (outputDim > 0)
{
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index 637aa17013..c455c52e5a 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -7,7 +7,7 @@
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
namespace armnn
{
@@ -39,7 +39,7 @@ void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorIn
tmpIndex = i;
}
}
- out[outer * innerElements + inner] = boost::numeric_cast<int32_t>(tmpIndex);
+ out[outer * innerElements + inner] = armnn::numeric_cast<int32_t>(tmpIndex);
}
}
}
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 61a504ec6b..ce07110da9 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -6,6 +6,7 @@
#include "DetectionPostProcess.hpp"
#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -67,7 +68,7 @@ std::vector<unsigned int> NonMaxSuppression(unsigned int numBoxes,
}
// Sort the indices based on scores.
- unsigned int numAboveThreshold = boost::numeric_cast<unsigned int>(scoresAboveThreshold.size());
+ unsigned int numAboveThreshold = armnn::numeric_cast<unsigned int>(scoresAboveThreshold.size());
std::vector<unsigned int> sortedIndices = GenerateRangeK(numAboveThreshold);
TopKSort(numAboveThreshold, sortedIndices.data(), scoresAboveThreshold.data(), numAboveThreshold);
@@ -267,7 +268,7 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
}
// Select max detection numbers of the highest score across all classes
- unsigned int numSelected = boost::numeric_cast<unsigned int>(selectedBoxesAfterNms.size());
+ unsigned int numSelected = armnn::numeric_cast<unsigned int>(selectedBoxesAfterNms.size());
unsigned int numOutput = std::min(desc.m_MaxDetections, numSelected);
// Sort the max scores among the selected indices.
@@ -311,7 +312,7 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
desc.m_MaxDetections,
desc.m_NmsIouThreshold);
- unsigned int numSelected = boost::numeric_cast<unsigned int>(selectedIndices.size());
+ unsigned int numSelected = armnn::numeric_cast<unsigned int>(selectedIndices.size());
unsigned int numOutput = std::min(desc.m_MaxDetections, numSelected);
AllocateOutputData(detectionBoxesInfo.GetShape()[1], numOutput, boxCorners, selectedIndices,
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 3e2190c81b..03aa2458f5 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -9,8 +9,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
namespace armnn
{
@@ -37,7 +36,7 @@ void Gather(const TensorInfo& paramsInfo,
unsigned int outIndex = 0;
for (unsigned int i = 0; i < indicesInfo.GetNumElements(); ++i)
{
- unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
+ unsigned int indx = armnn::numeric_cast<unsigned int>(indices[i]);
ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 1998f50c87..2b6384913e 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -8,17 +8,16 @@
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cmath>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace
{
inline bool ValidateAxis(int axis, unsigned int numDimensions)
{
- const int sNumDimensions = boost::numeric_cast<int>(numDimensions);
+ const int sNumDimensions = armnn::numeric_cast<int>(numDimensions);
return axis < sNumDimensions && axis >= -sNumDimensions;
}
@@ -40,8 +39,8 @@ void LogSoftmax(Decoder<float>& input,
IgnoreUnused(axisIsValid);
unsigned int uAxis = descriptor.m_Axis < 0 ?
- numDimensions - boost::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
- boost::numeric_cast<unsigned int>(descriptor.m_Axis);
+ numDimensions - armnn::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
+ armnn::numeric_cast<unsigned int>(descriptor.m_Axis);
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int outerSize = armnnUtils::GetNumElementsBetween(inputShape, 0, uAxis);
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index 72080ef042..e43a4d5312 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -6,6 +6,8 @@
#include "Mean.hpp"
#include <backendsCommon/WorkloadData.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <cmath>
@@ -111,7 +113,7 @@ void Mean(const armnn::TensorInfo& inputInfo,
resolvedAxis.push_back(idx);
}
}
- auto numResolvedAxis = boost::numeric_cast<unsigned int>(resolvedAxis.size());
+ auto numResolvedAxis = armnn::numeric_cast<unsigned int>(resolvedAxis.size());
// Iterates through input_data and sum up the reduced axis.
for (bool hasNext = true; hasNext; hasNext = NextIndex(inputNumDims, inputDims, tempIndex))
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index 9b220619a4..435671ffad 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -9,6 +9,7 @@
#include <armnn/Types.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -151,20 +152,20 @@ void Pooling2d(Decoder<float>& rInputDecoder,
auto heightIndex = dataLayout.GetHeightIndex();
auto widthIndex = dataLayout.GetWidthIndex();
- const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]);
- const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
- const int heightOutput = boost::numeric_cast<int>(outputInfo.GetShape()[heightIndex]);
- const int widthOutput = boost::numeric_cast<int>(outputInfo.GetShape()[widthIndex]);
- const int heightInput = boost::numeric_cast<int>(inputInfo.GetShape()[heightIndex]);
- const int widthInput = boost::numeric_cast<int>(inputInfo.GetShape()[widthIndex]);
- const int padLeft = boost::numeric_cast<int>(params.m_PadLeft);
- const int padRight = boost::numeric_cast<int>(params.m_PadRight);
- const int padTop = boost::numeric_cast<int>(params.m_PadTop);
- const int padBottom = boost::numeric_cast<int>(params.m_PadBottom);
- const int strideX = boost::numeric_cast<int>(params.m_StrideX);
- const int strideY = boost::numeric_cast<int>(params.m_StrideY);
- const int poolHeight = boost::numeric_cast<int>(params.m_PoolHeight);
- const int poolWidth = boost::numeric_cast<int>(params.m_PoolWidth);
+ const int batchSize = armnn::numeric_cast<int>(outputInfo.GetShape()[0]);
+ const int channels = armnn::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
+ const int heightOutput = armnn::numeric_cast<int>(outputInfo.GetShape()[heightIndex]);
+ const int widthOutput = armnn::numeric_cast<int>(outputInfo.GetShape()[widthIndex]);
+ const int heightInput = armnn::numeric_cast<int>(inputInfo.GetShape()[heightIndex]);
+ const int widthInput = armnn::numeric_cast<int>(inputInfo.GetShape()[widthIndex]);
+ const int padLeft = armnn::numeric_cast<int>(params.m_PadLeft);
+ const int padRight = armnn::numeric_cast<int>(params.m_PadRight);
+ const int padTop = armnn::numeric_cast<int>(params.m_PadTop);
+ const int padBottom = armnn::numeric_cast<int>(params.m_PadBottom);
+ const int strideX = armnn::numeric_cast<int>(params.m_StrideX);
+ const int strideY = armnn::numeric_cast<int>(params.m_StrideY);
+ const int poolHeight = armnn::numeric_cast<int>(params.m_PoolHeight);
+ const int poolWidth = armnn::numeric_cast<int>(params.m_PoolWidth);
float defaultInitializer = DefaultInitializer(params.m_PoolType);
@@ -221,10 +222,10 @@ void Pooling2d(Decoder<float>& rInputDecoder,
result = 0.0f;
unsigned int outputIndex = dataLayout.GetIndex(outputShape,
- boost::numeric_cast<unsigned int>(n),
- boost::numeric_cast<unsigned int>(c),
- boost::numeric_cast<unsigned int>(yOutput),
- boost::numeric_cast<unsigned int>(xOutput));
+ armnn::numeric_cast<unsigned int>(n),
+ armnn::numeric_cast<unsigned int>(c),
+ armnn::numeric_cast<unsigned int>(yOutput),
+ armnn::numeric_cast<unsigned int>(xOutput));
rOutputEncoder[outputIndex];
rOutputEncoder.Set(result);
continue;
@@ -244,10 +245,10 @@ void Pooling2d(Decoder<float>& rInputDecoder,
for (auto xInput = wstart; xInput < wend; xInput++)
{
unsigned int inputIndex = dataLayout.GetIndex(inputShape,
- boost::numeric_cast<unsigned int>(n),
- boost::numeric_cast<unsigned int>(c),
- boost::numeric_cast<unsigned int>(yInput),
- boost::numeric_cast<unsigned int>(xInput));
+ armnn::numeric_cast<unsigned int>(n),
+ armnn::numeric_cast<unsigned int>(c),
+ armnn::numeric_cast<unsigned int>(yInput),
+ armnn::numeric_cast<unsigned int>(xInput));
rInputDecoder[inputIndex];
float inval = rInputDecoder.Get();
@@ -259,10 +260,10 @@ void Pooling2d(Decoder<float>& rInputDecoder,
execute(result, poolAreaSize);
unsigned int outputIndex = dataLayout.GetIndex(outputShape,
- boost::numeric_cast<unsigned int>(n),
- boost::numeric_cast<unsigned int>(c),
- boost::numeric_cast<unsigned int>(yOutput),
- boost::numeric_cast<unsigned int>(xOutput));
+ armnn::numeric_cast<unsigned int>(n),
+ armnn::numeric_cast<unsigned int>(c),
+ armnn::numeric_cast<unsigned int>(yOutput),
+ armnn::numeric_cast<unsigned int>(xOutput));
rOutputEncoder[outputIndex];
rOutputEncoder.Set(result);
diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
index 6fec1abe6f..f80901edc9 100644
--- a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
@@ -11,8 +11,7 @@
#include <Profiling.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
-
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cmath>
@@ -39,26 +38,26 @@ void RefL2NormalizationWorkload::Execute() const
const TensorShape& shape = inputInfo.GetShape();
unsigned int paddedShapeArray[4];
- const int idxShift = 4 - boost::numeric_cast<int>(shape.GetNumDimensions());
+ const int idxShift = 4 - armnn::numeric_cast<int>(shape.GetNumDimensions());
const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
paddedShapeArray[0] = batches;
- const int channelsIdx = boost::numeric_cast<int>(dataLayout.GetChannelsIndex());
+ const int channelsIdx = armnn::numeric_cast<int>(dataLayout.GetChannelsIndex());
const unsigned int channels = (channelsIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(channelsIdx - idxShift)]
+ ? shape[armnn::numeric_cast<unsigned int>(channelsIdx - idxShift)]
: 1;
paddedShapeArray[channelsIdx] = channels;
- const int heightIdx = boost::numeric_cast<int>(dataLayout.GetHeightIndex());
+ const int heightIdx = armnn::numeric_cast<int>(dataLayout.GetHeightIndex());
const unsigned int height = (heightIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(heightIdx - idxShift)]
+ ? shape[armnn::numeric_cast<unsigned int>(heightIdx - idxShift)]
: 1;
paddedShapeArray[heightIdx] = height;
- const int widthIdx = boost::numeric_cast<int>(dataLayout.GetWidthIndex());
+ const int widthIdx = armnn::numeric_cast<int>(dataLayout.GetWidthIndex());
const unsigned int width = (widthIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(widthIdx - idxShift)]
+ ? shape[armnn::numeric_cast<unsigned int>(widthIdx - idxShift)]
: 1;
paddedShapeArray[widthIdx] = width;
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index a41f68349a..d5d2104cba 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -8,11 +8,10 @@
#include <armnn/Logging.hpp>
#include <armnn/Tensor.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <Profiling.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
@@ -37,7 +36,7 @@ void NormalizeWithinUingLbr(Decoder<float>& inputData,
const unsigned int rows = tensorShape[2];
const unsigned int cols = tensorShape[3];
- int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
+ int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
for (unsigned int n = 0; n < batchSize; n++)
{
@@ -52,23 +51,23 @@ void NormalizeWithinUingLbr(Decoder<float>& inputData,
{
for (int x = -radius; x <= radius; x++)
{
- int i = boost::numeric_cast<int>(w) + x;
- int j = boost::numeric_cast<int>(h) + y;
+ int i = armnn::numeric_cast<int>(w) + x;
+ int j = armnn::numeric_cast<int>(h) + y;
- if ((i < 0) || (i >= boost::numeric_cast<int>(cols)))
+ if ((i < 0) || (i >= armnn::numeric_cast<int>(cols)))
{
continue;
}
- if ((j < 0) || (j >= boost::numeric_cast<int>(rows)))
+ if ((j < 0) || (j >= armnn::numeric_cast<int>(rows)))
{
continue;
}
unsigned int inputIndex = n * cols * rows * depth +
c * cols * rows +
- boost::numeric_cast<unsigned int>(j) * cols +
- boost::numeric_cast<unsigned int>(i);
+ armnn::numeric_cast<unsigned int>(j) * cols +
+ armnn::numeric_cast<unsigned int>(i);
inputData[inputIndex];
float inval = inputData.Get();
@@ -106,7 +105,7 @@ void NormalizeAcrossUingLbr(Decoder<float>& inputData,
const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
- int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
+ int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
for (unsigned int n = 0; n < batchSize; n++)
{
@@ -119,16 +118,16 @@ void NormalizeAcrossUingLbr(Decoder<float>& inputData,
float accumulated_scale = 0.0;
for (int z = -radius; z <= radius; z++)
{
- int k = boost::numeric_cast<int>(c) + z;
+ int k = armnn::numeric_cast<int>(c) + z;
- if ((k < 0) || (k >= boost::numeric_cast<int>(depth)))
+ if ((k < 0) || (k >= armnn::numeric_cast<int>(depth)))
{
continue;
}
unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
n,
- boost::numeric_cast<unsigned int>(k),
+ armnn::numeric_cast<unsigned int>(k),
h,
w);
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index b00b049ff6..c5fb121cb3 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -8,8 +8,7 @@
#include <ResolveType.hpp>
#include <armnn/utility/Assert.hpp>
-
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cstring>
@@ -24,7 +23,7 @@ void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
const unsigned int beginIndicesCount =
- boost::numeric_cast<unsigned int>(p.m_Begin.size());
+ armnn::numeric_cast<unsigned int>(p.m_Begin.size());
ARMNN_ASSERT(dimCount >= beginIndicesCount);
const unsigned int padCount = dimCount - beginIndicesCount;
@@ -116,7 +115,7 @@ void StridedSlice(const TensorInfo& inputInfo,
const int start3 = paddedParams.GetStartForAxis(inputShape, 3);
const int stop3 = paddedParams.GetStopForAxis (inputShape, 3, start3);
- const int step = boost::numeric_cast<int>(dataTypeSize);
+ const int step = armnn::numeric_cast<int>(dataTypeSize);
for (int in0 = start0;
!LoopCondition(in0, stop0, paddedParams.m_Stride[0]);
@@ -134,9 +133,9 @@ void StridedSlice(const TensorInfo& inputInfo,
!LoopCondition(in3, stop3, paddedParams.m_Stride[3]);
in3 += paddedParams.m_Stride[3])
{
- int dim1 = boost::numeric_cast<int>(inputShape[1]);
- int dim2 = boost::numeric_cast<int>(inputShape[2]);
- int dim3 = boost::numeric_cast<int>(inputShape[3]);
+ int dim1 = armnn::numeric_cast<int>(inputShape[1]);
+ int dim2 = armnn::numeric_cast<int>(inputShape[2]);
+ int dim3 = armnn::numeric_cast<int>(inputShape[3]);
int inputOffset = (((in0 * dim1 + in1) * dim2 + in2) * dim3 + in3) * step;
::memcpy(output, input + inputOffset, dataTypeSize);