From 5b8093c17044e8eaaaa42d96ba4902dee5791be4 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Fri, 22 Oct 2021 11:12:07 +0100 Subject: IVGCVSW-6420: Constant flag in tensor info is not set correctly !android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148 --- src/armnn/test/ShapeInferenceTests.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'src/armnn/test/ShapeInferenceTests.cpp') diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp index d3c928fec1..f808a0e349 100644 --- a/src/armnn/test/ShapeInferenceTests.cpp +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -233,14 +233,14 @@ TEST_CASE("ConcatTest") CreateGraphAndRunTest({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat"); } -TEST_CASE("ConstantTesst") +TEST_CASE("ConstantTest") { Graph graph; TensorShape outputShape{ 1, 1, 3, 3 }; auto layer = BuildGraph(&graph, {}, "constant"); const float Datum = 0.0f; - ConstTensor output0({outputShape, DataType::Float32}, &Datum); + ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum); layer->m_LayerOutput = std::make_unique(output0); layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32}); @@ -294,7 +294,7 @@ TEST_CASE("Convolution2dTest") "conv2d"); const float Datum = 0.0f; - ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum); + ConstTensor weights({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum); layer->m_Weight = std::make_unique(weights); RunShapeInferenceTest(layer, {{ 1, 1, 4, 4 }}); @@ -339,7 +339,7 @@ TEST_CASE("DepthwiseConvolutionTest") "depthwiseconv2d"); const float Datum = 0.0f; - ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); layer->m_Weight = std::make_unique(weights); RunShapeInferenceTest(layer, {{ 8, 18, 1, 2 }}); @@ -371,7 +371,7 @@ TEST_CASE("DetectionPostProcessTest") descriptor.m_ScaleW = 5.0; const float Datum = 0.0f; - ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32}, &Datum); + ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum); Graph graph; @@ -460,7 +460,7 @@ TEST_CASE("LstmTest") auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm"); float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); @@ -548,7 +548,7 @@ TEST_CASE("QLstmTest") auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm"); float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); @@ -576,7 +576,7 @@ TEST_CASE("QuantizedLstmTest") auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm"); float Datum = 0.0f; - ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum); layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique(constTensor); layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique(constTensor); -- cgit v1.2.1