From d812a31cdd00c7f45d542f013e803c9fcb0aaac4 Mon Sep 17 00:00:00 2001 From: Rob Hughes Date: Fri, 6 Aug 2021 13:10:53 +0100 Subject: Replace use of non-standard variable length arrays Replace these with a std::vector for portability Change-Id: Ia6b0dc9360446ef8bba0baa88c61b4c400fcd799 Signed-off-by: Rob Hughes --- src/armnnTfLiteParser/TfLiteParser.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 3e59244753..2ac325c08c 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -426,7 +426,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr, } else { - unsigned long shapeSignatureSize = tensorPtr->shape_signature.size(); + size_t shapeSignatureSize = tensorPtr->shape_signature.size(); // If a shape signature exists we will use that to infer dynamic tensors if (shapeSignatureSize != 0) @@ -444,12 +444,12 @@ armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr, } } - bool dimMask[tensorPtr->shape_signature.size()]; + std::unique_ptr dimMask = std::make_unique(tensorPtr->shape_signature.size()); for (unsigned int i = 0; i < tensorPtr->shape_signature.size(); ++i) { dimMask[i] = tensorPtr->shape_signature[i] == -1 ? false : true; } - tensorShape = TensorShape(static_cast(safeShape.size()), safeShape.data(), dimMask); + tensorShape = TensorShape(static_cast(safeShape.size()), safeShape.data(), dimMask.get()); } // If there is no shape signature treat the tensor as dynamic if the shape has a size of zero else if (shape.size() == 0) @@ -1170,7 +1170,7 @@ void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorInde axis = inputDimSize + axis + 1; } - unsigned int shape[static_cast(inputDimSize) + 1]; + std::vector shape(static_cast(inputDimSize) + 1); unsigned int inputShapeIndex = 0; for (unsigned int i = 0; i < static_cast(inputDimSize + 1); ++i) { @@ -1185,7 +1185,7 @@ void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorInde } } - reshapeDesc.m_TargetShape = TensorShape(static_cast(inputDimSize + 1), shape); + reshapeDesc.m_TargetShape = TensorShape(static_cast(inputDimSize + 1), shape.data()); } IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str()); -- cgit v1.2.1