From 9b088d9c275ed11ca39f8b035e02a68f681c5d45 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Mon, 14 Sep 2020 15:12:55 +0100 Subject: IVGCVSW-5304 Remove boost::numeric_cast from Android-nn-driver * Replaced with armnn/utility/NumericCast.hpp Signed-off-by: Matthew Sloyan Change-Id: I6253261508e49ddeaa5ba3f3893a059486637467 --- ConversionUtils.hpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'ConversionUtils.hpp') diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 450b91f2..b9c4d411 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -22,7 +23,6 @@ #include #include -#include #include #include @@ -308,8 +308,8 @@ bool BroadcastTensor(LayerInputHandle& input0, // to the "smaller" tensor using a reshape, while keeping the order of the inputs. unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1); - unsigned int sizeDifference = std::abs(boost::numeric_cast(inputDimensions0) - - boost::numeric_cast(inputDimensions1)); + unsigned int sizeDifference = std::abs(armnn::numeric_cast(inputDimensions0) - + armnn::numeric_cast(inputDimensions1)); bool input0IsSmaller = inputDimensions0 < inputDimensions1; LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1; @@ -323,7 +323,7 @@ bool BroadcastTensor(LayerInputHandle& input0, } armnn::TensorInfo reshapedInfo = smallInfo; - reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast(reshapedDimensions.size()), + reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast(reshapedDimensions.size()), reshapedDimensions.data() }); // RehsapeDescriptor that is ignored in the IsReshapeSupported function @@ -385,8 +385,8 @@ void CalcPadding(uint32_t input, int32_t padHead; int32_t padTail; calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail); - outPadHead = boost::numeric_cast(padHead); - outPadTail = boost::numeric_cast(padTail); + outPadHead = armnn::numeric_cast(padHead); + outPadTail = armnn::numeric_cast(padTail); } #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) @@ -397,8 +397,8 @@ void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dila int32_t padHead; int32_t padTail; calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail); - outPadHead = boost::numeric_cast(padHead); - outPadTail = boost::numeric_cast(padTail); + outPadHead = armnn::numeric_cast(padHead); + outPadTail = armnn::numeric_cast(padTail); } void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead, @@ -3933,7 +3933,7 @@ bool ConvertTranspose(const HalOperation& operation, const HalModel& model, Conv { for (unsigned int i = rank; i > 0; i--) { - perm[rank - i] = boost::numeric_cast (i - 1); + perm[rank - i] = armnn::numeric_cast (i - 1); } } else if (!GetTensorInt32Values(*permOperand, perm, model, data)) -- cgit v1.2.1