From 1b46d132a3330692fcf9a603b21363a28f46ef03 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 3 Nov 2021 11:12:45 +0000 Subject: IVGCVSW-6430 Clear up coverity issues * Removed unreachable code * break after if else where both branches return * Removed unused operations * Result of dstPtr++ is unused * Fixed possible overflow * Axis dimensionSequence assignment can result in overflow where rank is not 4 * Removed use of old-style casts * Fixed spelling mistakes in error messages Signed-off-by: Mike Kelly Change-Id: If2a7ab63fc1d200cb18b494d99a67bbddb42f0f8 --- ConversionUtils.hpp | 20 ++++++++++---------- ConversionUtils_1_2.hpp | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 1d182fad..8f7d5b9b 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1276,7 +1276,6 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, Fail("%s: invalid operand tensor", __func__); return LayerInputHandle(); } - break; } default: { @@ -2139,7 +2138,7 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, if (inputShapes.size() != inputHandles.size()) { - return Fail("%s: invalid model input shapes size doesn't match input handles sise: %i != %i", __func__, + return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__, inputShapes.size(), inputHandles.size()); } @@ -2258,13 +2257,13 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, if (static_cast(numInputSlots) != inputHandles.size()) { - return Fail("%s: invalid model input slots size doesn't match input handles sise: %i != %i", __func__, + return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__, static_cast(numInputSlots), inputHandles.size()); } for (int i = 0; i < numInputSlots; ++i) { // connect the input directly to the merge (concat) layer - inputHandles[static_cast(i)].Connect(layer->GetInputSlot(i)); + inputHandles[static_cast(i)].Connect(layer->GetInputSlot(static_cast(i))); } // Transpose the output shape @@ -3019,7 +3018,7 @@ DequantizeResult DequantizeIfRequired(size_t operand_index, { return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND }; } - *dstPtr++ = quantizedBuffer[i] * quantizationScale; + *dstPtr = quantizedBuffer[i] * quantizationScale; } // Construct tensor info for dequantized ConstTensor @@ -3812,13 +3811,13 @@ bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, Conver // if the operand index is out of bounds. const HalOperand* axisOperand = GetInputOperand(operation, 1, model, false); - const uint32_t dimensionSequence[] = { 0, 1, 2, 3 }; - std::vector axis; if (!axisOperand) { - axis.assign(dimensionSequence, - dimensionSequence + rank); + for (unsigned int i = 0; i < rank; ++i) + { + axis.push_back(static_cast(i)); + } } else if (!GetTensorInt32Values(*axisOperand, axis, model, data)) { @@ -4260,7 +4259,8 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); } - paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); + paddingList.emplace_back(static_cast(paddingBeforeInput), + static_cast(paddingAfterInput)); } armnn::SpaceToBatchNdDescriptor descriptor; diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index acf787f3..155fdf40 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -2396,7 +2396,7 @@ bool ConvertSpaceToDepth(const HalOperation& operation, const HalModel& model, C IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc); if (!layer) { - return Fail("%s: Could not add the SpaceToDephLayer", __func__); + return Fail("%s: Could not add the SpaceToDepthLayer", __func__); } input.Connect(layer->GetInputSlot(0)); -- cgit v1.2.1