aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2021-11-03 11:12:45 +0000
committermike.kelly <mike.kelly@arm.com>2021-11-04 12:22:24 +0000
commit1b46d132a3330692fcf9a603b21363a28f46ef03 (patch)
treec763d009cce0b13765b094c93741fb67d818defd
parentf65b266b7bbabe29a694a1a753ae10b65301c1f1 (diff)
downloadandroid-nn-driver-1b46d132a3330692fcf9a603b21363a28f46ef03.tar.gz
IVGCVSW-6430 Clear up coverity issues
* Removed unreachable code * break after if else where both branches return * Removed unused operations * Result of dstPtr++ is unused * Fixed possible overflow * Axis dimensionSequence assignment can result in overflow where rank is not 4 * Removed use of old-style casts * Fixed spelling mistakes in error messages Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: If2a7ab63fc1d200cb18b494d99a67bbddb42f0f8
-rw-r--r--ConversionUtils.hpp20
-rw-r--r--ConversionUtils_1_2.hpp2
2 files changed, 11 insertions, 11 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 1d182fad..8f7d5b9b 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1276,7 +1276,6 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
Fail("%s: invalid operand tensor", __func__);
return LayerInputHandle();
}
- break;
}
default:
{
@@ -2139,7 +2138,7 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model,
if (inputShapes.size() != inputHandles.size())
{
- return Fail("%s: invalid model input shapes size doesn't match input handles sise: %i != %i", __func__,
+ return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
inputShapes.size(), inputHandles.size());
}
@@ -2258,13 +2257,13 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model,
if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
{
- return Fail("%s: invalid model input slots size doesn't match input handles sise: %i != %i", __func__,
+ return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
static_cast<std::size_t>(numInputSlots), inputHandles.size());
}
for (int i = 0; i < numInputSlots; ++i)
{
// connect the input directly to the merge (concat) layer
- inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
+ inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
}
// Transpose the output shape
@@ -3019,7 +3018,7 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
{
return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
}
- *dstPtr++ = quantizedBuffer[i] * quantizationScale;
+ *dstPtr = quantizedBuffer[i] * quantizationScale;
}
// Construct tensor info for dequantized ConstTensor
@@ -3812,13 +3811,13 @@ bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, Conver
// if the operand index is out of bounds.
const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
- const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
-
std::vector<int32_t> axis;
if (!axisOperand)
{
- axis.assign(dimensionSequence,
- dimensionSequence + rank);
+ for (unsigned int i = 0; i < rank; ++i)
+ {
+ axis.push_back(static_cast<unsigned int>(i));
+ }
}
else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
{
@@ -4260,7 +4259,8 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model,
return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
}
- paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+ paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
+ static_cast<unsigned int>(paddingAfterInput));
}
armnn::SpaceToBatchNdDescriptor descriptor;
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index acf787f3..155fdf40 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -2396,7 +2396,7 @@ bool ConvertSpaceToDepth(const HalOperation& operation, const HalModel& model, C
IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
if (!layer)
{
- return Fail("%s: Could not add the SpaceToDephLayer", __func__);
+ return Fail("%s: Could not add the SpaceToDepthLayer", __func__);
}
input.Connect(layer->GetInputSlot(0));