From 9b088d9c275ed11ca39f8b035e02a68f681c5d45 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Mon, 14 Sep 2020 15:12:55 +0100 Subject: IVGCVSW-5304 Remove boost::numeric_cast from Android-nn-driver * Replaced with armnn/utility/NumericCast.hpp Signed-off-by: Matthew Sloyan Change-Id: I6253261508e49ddeaa5ba3f3893a059486637467 --- ConversionUtils.hpp | 18 +++++++++--------- ConversionUtils_1_2.hpp | 14 ++++++++------ 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 450b91f2..b9c4d411 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -22,7 +23,6 @@ #include #include -#include #include #include @@ -308,8 +308,8 @@ bool BroadcastTensor(LayerInputHandle& input0, // to the "smaller" tensor using a reshape, while keeping the order of the inputs. unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1); - unsigned int sizeDifference = std::abs(boost::numeric_cast(inputDimensions0) - - boost::numeric_cast(inputDimensions1)); + unsigned int sizeDifference = std::abs(armnn::numeric_cast(inputDimensions0) - + armnn::numeric_cast(inputDimensions1)); bool input0IsSmaller = inputDimensions0 < inputDimensions1; LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1; @@ -323,7 +323,7 @@ bool BroadcastTensor(LayerInputHandle& input0, } armnn::TensorInfo reshapedInfo = smallInfo; - reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast(reshapedDimensions.size()), + reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast(reshapedDimensions.size()), reshapedDimensions.data() }); // RehsapeDescriptor that is ignored in the IsReshapeSupported function @@ -385,8 +385,8 @@ void CalcPadding(uint32_t input, int32_t padHead; int32_t padTail; calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail); - outPadHead = boost::numeric_cast(padHead); - outPadTail = boost::numeric_cast(padTail); + outPadHead = armnn::numeric_cast(padHead); + outPadTail = armnn::numeric_cast(padTail); } #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) @@ -397,8 +397,8 @@ void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dila int32_t padHead; int32_t padTail; calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail); - outPadHead = boost::numeric_cast(padHead); - outPadTail = boost::numeric_cast(padTail); + outPadHead = armnn::numeric_cast(padHead); + outPadTail = armnn::numeric_cast(padTail); } void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead, @@ -3933,7 +3933,7 @@ bool ConvertTranspose(const HalOperation& operation, const HalModel& model, Conv { for (unsigned int i = rank; i > 0; i--) { - perm[rank - i] = boost::numeric_cast (i - 1); + perm[rank - i] = armnn::numeric_cast (i - 1); } } else if (!GetTensorInt32Values(*permOperand, perm, model, data)) diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index 2f4b91bf..e4bc05f9 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -8,6 +8,8 @@ #include "Utils.hpp" #include "ConversionUtils.hpp" + +#include #include #include @@ -2769,12 +2771,12 @@ bool ConvertTransposeConv2d(const HalOperation& operation, const HalModel& model desc.m_OutputShapeEnabled = true; } - desc.m_StrideX = boost::numeric_cast(strideX); - desc.m_StrideY = boost::numeric_cast(strideY); - desc.m_PadLeft = boost::numeric_cast(padLeft); - desc.m_PadRight = boost::numeric_cast(padRight); - desc.m_PadTop = boost::numeric_cast(padTop); - desc.m_PadBottom = boost::numeric_cast(padBottom); + desc.m_StrideX = armnn::numeric_cast(strideX); + desc.m_StrideY = armnn::numeric_cast(strideY); + desc.m_PadLeft = armnn::numeric_cast(padLeft); + desc.m_PadRight = armnn::numeric_cast(padRight); + desc.m_PadTop = armnn::numeric_cast(padTop); + desc.m_PadBottom = armnn::numeric_cast(padBottom); } else if (operation.inputs.size() == 11) { -- cgit v1.2.1