From 4002a609b1e5a75d7527cd8b7cb05ed444d27caa Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Wed, 23 Nov 2022 18:58:06 +0000 Subject: Fix some memory overruns / undefined behaviour in ShapeInferenceTests In several cases the address of a single float value on the stack was passed as a pointer to the constructor of ScopedTensor (which needs a backing-store of size equal to GetNumBytes()). Replace by using a std::vector to explicitly create and initialize the right number of elements. Signed-off-by: Matthew Bentham Change-Id: I8a1f4bf169bd89983f2d68047173ec901a21e1fb --- src/armnn/test/ShapeInferenceTests.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index 1035a3b6fd..333d12a3a2 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -239,8 +239,8 @@ TEST_CASE("ConstantTest") TensorShape outputShape{ 1, 1, 3, 3 }; auto layer = BuildGraph(&graph, {}, "constant"); - const float Datum = 0.0f; - ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector data(9, 0.0f); + ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, data); layer->m_LayerOutput = std::make_unique(output0); layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32}); @@ -343,8 +343,8 @@ TEST_CASE("DetectionPostProcessTest") descriptor.m_ScaleH = 5.0; descriptor.m_ScaleW = 5.0; - const float Datum = 0.0f; - ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector data(9, 0.0f); + ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, data); Graph graph; @@ -432,8 +432,8 @@ TEST_CASE("LstmTest") Graph graph; auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm"); - float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector data(60, 0.0f); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data); layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); @@ -520,8 +520,8 @@ TEST_CASE("QLstmTest") Graph graph; auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm"); - float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector data(60, 0.0f); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data); layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); @@ -548,8 +548,8 @@ TEST_CASE("QuantizedLstmTest") Graph graph; auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm"); - float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector data(60, 0.0f); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data); layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique(constTensor); layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique(constTensor); @@ -713,4 +713,4 @@ TEST_CASE("TransposeTest") } } -} \ No newline at end of file +} -- cgit v1.2.1