diff options
author | Matthew Bentham <matthew.bentham@arm.com> | 2022-11-23 18:58:06 +0000 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2022-12-07 09:33:05 +0000 |
commit | 4002a609b1e5a75d7527cd8b7cb05ed444d27caa (patch) | |
tree | 42cf79157bab3cfa83b5ccec1b8f69e400d65314 /src/armnn | |
parent | 30dc2d2a1b0e9d3822abddc739dcb36cb672ea98 (diff) | |
download | armnn-4002a609b1e5a75d7527cd8b7cb05ed444d27caa.tar.gz |
Fix some memory overruns / undefined behaviour in ShapeInferenceTests
In several cases the address of a single float value on the stack was
passed as a pointer to the constructor of ScopedTensor (which needs
a backing-store of size equal to GetNumBytes()).
Replace by using a std::vector to explicitly create and initialize
the right number of elements.
Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
Change-Id: I8a1f4bf169bd89983f2d68047173ec901a21e1fb
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/test/ShapeInferenceTests.cpp | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index 1035a3b6fd..333d12a3a2 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -239,8 +239,8 @@ TEST_CASE("ConstantTest") TensorShape outputShape{ 1, 1, 3, 3 }; auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant"); - const float Datum = 0.0f; - ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector<float> data(9, 0.0f); + ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, data); layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0); layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32}); @@ -343,8 +343,8 @@ TEST_CASE("DetectionPostProcessTest") descriptor.m_ScaleH = 5.0; descriptor.m_ScaleW = 5.0; - const float Datum = 0.0f; - ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector<float> data(9, 0.0f); + ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, data); Graph graph; @@ -432,8 +432,8 @@ TEST_CASE("LstmTest") Graph graph; auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm"); - float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector<float> data(60, 0.0f); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data); layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor); layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor); @@ -520,8 +520,8 @@ TEST_CASE("QLstmTest") Graph graph; auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm"); - float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector<float> data(60, 0.0f); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data); layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor); layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor); @@ -548,8 +548,8 @@ TEST_CASE("QuantizedLstmTest") Graph graph; auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm"); - float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); + std::vector<float> data(60, 0.0f); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data); layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor); layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor); @@ -713,4 +713,4 @@ TEST_CASE("TransposeTest") } } -}
\ No newline at end of file +} |