aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan OShea <Ryan.OShea2@arm.com>2020-02-21 12:33:17 +0000
committerRyan O'Shea <ryan.oshea2@arm.com>2020-02-21 12:37:19 +0000
commit337c17f964fd4d40fe638ef960f7f96b61b63947 (patch)
treef7e997c66d7f56dde36a7cbe8e766b00111f2e22
parentd149ad03250cd8ad8bfe28cbb830ca3fcc6c4f04 (diff)
downloadarmnn-337c17f964fd4d40fe638ef960f7f96b61b63947.tar.gz
IVGCVSW-4307 Onnx Segfault Bug
* Fixed ParseConstant * Fixed Exception * Fixed 0 dimension tensor * Add unit test to check addition as tensor and scalar * Change C++ style comments to Doxygen style Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com> Change-Id: Ied7cbe2e11e713c23d87106d2db39e0eb446c920
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp27
-rw-r--r--src/armnnOnnxParser/OnnxParser.hpp15
-rw-r--r--src/armnnOnnxParser/test/Addition.cpp110
3 files changed, 134 insertions, 18 deletions
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 89be4289d6..0c1af03af4 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -234,17 +234,28 @@ armnn::TensorInfo ToTensorInfo(const onnx::ValueInfoProto& info)
shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(onnxShape.dim(i).dim_value())));
}
+ if (shapeDims.empty())
+ {
+ shapeDims.push_back(1);
+ }
+
return ToTensorInfo(info.name(), shapeDims, info.type().tensor_type().elem_type());
}
armnn::TensorInfo ToTensorInfo(const onnx::TensorProto& tensor)
{
std::vector<unsigned int> shapeDims;
+
for (auto dim: tensor.dims())
{
shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(dim)));
}
+ if (shapeDims.empty())
+ {
+ shapeDims.push_back(1);
+ }
+
return ToTensorInfo(tensor.name(), shapeDims, tensor.data_type());
}
@@ -624,7 +635,7 @@ void OnnxParser::LoadGraph()
const std::string& operation = node.op_type();
// check which layers we handled already (add and matmul fused as FC)
- if(operation == "MatMul" )
+ if (operation == "MatMul" )
{
if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
{
@@ -881,7 +892,6 @@ void OnnxParser::CreateConstantLayer(const std::string& tensorName, const std::s
void OnnxParser::ParseConstant(const onnx::NodeProto& node)
{
CHECK_VALID_SIZE(static_cast<size_t>(node.attribute_size()), 1);
-
if (!node.attribute(0).has_t())
{
throw ParseException(boost::str(
@@ -897,9 +907,10 @@ void OnnxParser::ParseConstant(const onnx::NodeProto& node)
//Register this as a m_ConstParam so we know we can use it as a constant param in future layers.
m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
+ m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(ToTensorInfo(onnxTensor));
+ m_TensorsInfo[node.output(0)].m_dtype = static_cast<onnx::TensorProto::DataType>(onnxTensor.data_type());
CreateConstantLayer(node.output(0), node.name());
-
}
void OnnxParser::ParseMaxPool(const onnx::NodeProto& node)
@@ -1505,11 +1516,9 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
// register the input connection -> for constant inputs, we need to make a newDim constant layer
if(m_TensorsInfo[inputs.first].isConstant()) {
-
CreateConstantLayer(inputs.first, boost::str(boost::format("Add:constant_of_%1%") % node.input(0)));
}
if(m_TensorsInfo[inputs.second].isConstant()) {
-
CreateConstantLayer(inputs.second, boost::str(boost::format("Add:constant_of_%1%") % node.input(1)));
}
RegisterInputSlots(layer, {inputs.first, inputs.second});
@@ -1653,16 +1662,16 @@ void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector
m_TensorConnections[tensorId] = TensorSlots();
}
- TensorSlots & tensorSlots = m_TensorConnections[tensorId];
+ TensorSlots& tensorSlots = m_TensorConnections[tensorId];
// assuming there is only one producer for that tensor
if (tensorSlots.outputSlot != nullptr)
{
throw ParseException(boost::str(
boost::format("Another layer has already registered itself as the producer of "
- "tensor:%2% %3%") %
- tensorId %
- CHECK_LOCATION().AsString()));
+ "tensor:%1% %2%")
+ % tensorId
+ % CHECK_LOCATION().AsString()));
}
tensorSlots.outputSlot = slot;
}
diff --git a/src/armnnOnnxParser/OnnxParser.hpp b/src/armnnOnnxParser/OnnxParser.hpp
index a467180299..f9fa6d969f 100644
--- a/src/armnnOnnxParser/OnnxParser.hpp
+++ b/src/armnnOnnxParser/OnnxParser.hpp
@@ -54,10 +54,10 @@ public:
static ModelPtr LoadModelFromTextFile(const char * fileName);
static ModelPtr LoadModelFromString(const std::string& inputString);
- ///Retrieve inputs names
+ /// Retrieve inputs names
static std::vector<std::string> GetInputs(ModelPtr& model);
- ///Retrieve outputs names
+ /// Retrieve outputs names
static std::vector<std::string> GetOutputs(ModelPtr& model);
private:
@@ -65,7 +65,7 @@ private:
/// Parses a ModelProto loaded into memory from one of the other CreateNetwork*
armnn::INetworkPtr CreateNetworkFromModel(onnx::ModelProto& model);
- ///Parse every node and make the connection between the resulting tensors
+ /// Parse every node and make the connection between the resulting tensors
void LoadGraph();
void SetupInfo(const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list);
@@ -136,10 +136,10 @@ private:
/// The network we're building. Gets cleared after it is passed to the user
armnn::INetworkPtr m_Network;
- ///Ptr to the graph we're building the network from
+ /// Ptr to the graph we're building the network from
GraphPtr m_Graph;
- ///Map of the information for every tensor
+ /// Map of the information for every tensor
struct OnnxTensor
{
std::unique_ptr<armnn::TensorInfo> m_info;
@@ -148,7 +148,6 @@ private:
OnnxTensor() : m_info(nullptr), m_tensor(nullptr), m_dtype(onnx::TensorProto::FLOAT) { }
bool isConstant() { return m_tensor != nullptr; }
-
};
std::unordered_map<std::string, OnnxTensor> m_TensorsInfo;
@@ -166,10 +165,10 @@ private:
TensorSlots() : outputSlot(nullptr) { }
};
- ///Map of the tensor names to their connections for the connections of the layers of the graph
+ /// Map of the tensor names to their connections for the connections of the layers of the graph
std::unordered_map<std::string, TensorSlots> m_TensorConnections;
- //Map of the tensor names to their node and index in graph.node()
+ /// Map of the tensor names to their node and index in graph.node()
std::unordered_map<std::string, std::pair<const onnx::NodeProto*, int>> m_OutputsMap;
/// Number of times a specific node (identified by his index number) was used as input
diff --git a/src/armnnOnnxParser/test/Addition.cpp b/src/armnnOnnxParser/test/Addition.cpp
index 993a620dcc..6fc8eb1151 100644
--- a/src/armnnOnnxParser/test/Addition.cpp
+++ b/src/armnnOnnxParser/test/Addition.cpp
@@ -286,6 +286,103 @@ struct AddInvalidBroadcastFixture : public armnnUtils::ParserPrototxtFixture<arm
}
};
+struct AddScalarFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ AddScalarFixture(const std::string& dataType)
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input0"
+ type {
+ tensor_type {
+ elem_type: )" + dataType + R"(
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Input1"
+ type {
+ tensor_type {
+ elem_type: )" + dataType + R"(
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input0"
+ input: "Input1"
+ output: "Output"
+ name: "addition"
+ op_type: "Add"
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ }
+};
+
+struct AddValidScalarFixture : AddScalarFixture
+{
+ AddValidScalarFixture() : AddScalarFixture("1") {
+ Setup();
+ }
+};
+
+struct AddInvalidScalarFixture : AddScalarFixture
+{
+ AddInvalidScalarFixture() : AddScalarFixture("6") { }
+};
+
BOOST_FIXTURE_TEST_CASE(ValidAddTest, AddValidFixture)
{
RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
@@ -308,4 +405,15 @@ BOOST_FIXTURE_TEST_CASE(ValidBroadcastAdd, AddValidBroadcastFixture)
{"Input1", {1.0f, 2.0f, 3.0, 4.0f}}}, {{"Output", {2.0, 4.0, 0, 0.0}}});
}
-BOOST_AUTO_TEST_SUITE_END()
+BOOST_FIXTURE_TEST_CASE(ValidAddScalarTest, AddValidScalarFixture)
+{
+ RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
+ {"Input1", {-8.0f}}}, {{"Output", {-7.0, -6.0, -11.0, -12.0}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeAddScalar, AddInvalidScalarFixture)
+{
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file