aboutsummaryrefslogtreecommitdiff
path: root/src/armnnOnnxParser
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
commitc577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch)
treebd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnnOnnxParser
parent4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff)
downloadarmnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz
Release 18.08
Diffstat (limited to 'src/armnnOnnxParser')
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp1676
-rw-r--r--src/armnnOnnxParser/OnnxParser.hpp183
-rw-r--r--src/armnnOnnxParser/OnnxSupport.md60
-rw-r--r--src/armnnOnnxParser/README.md5
-rw-r--r--src/armnnOnnxParser/test/Addition.cpp311
-rw-r--r--src/armnnOnnxParser/test/BatchNorm.cpp342
-rw-r--r--src/armnnOnnxParser/test/Const.cpp87
-rw-r--r--src/armnnOnnxParser/test/Constructor.cpp16
-rw-r--r--src/armnnOnnxParser/test/Conv2D.cpp469
-rw-r--r--src/armnnOnnxParser/test/CreateNetwork.cpp63
-rw-r--r--src/armnnOnnxParser/test/DepthConv.cpp162
-rw-r--r--src/armnnOnnxParser/test/FullyConnected.cpp597
-rw-r--r--src/armnnOnnxParser/test/GetInputsOutputs.cpp255
-rw-r--r--src/armnnOnnxParser/test/Pooling.cpp310
-rw-r--r--src/armnnOnnxParser/test/ProtoxtFixture.cpp81
-rw-r--r--src/armnnOnnxParser/test/Relu.cpp70
-rw-r--r--src/armnnOnnxParser/test/Reshape.cpp110
17 files changed, 4797 insertions, 0 deletions
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
new file mode 100644
index 0000000000..fdf43076ef
--- /dev/null
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -0,0 +1,1676 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "OnnxParser.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Utils.hpp>
+#include <VerificationHelpers.hpp>
+
+#include <google/protobuf/text_format.h>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+
+#include <boost/format.hpp>
+
+#include <numeric>
+
+using namespace armnn;
+
+namespace armnnOnnxParser
+{
+namespace
+{
+void CheckValidDataType(std::initializer_list<onnx::TensorProto::DataType> validInputTypes,
+ const onnx::TensorProto::DataType actualValue,
+ const char* validExpr,
+ std::string nodeName,
+ std::string tensorName,
+ const armnn::CheckLocation& location)
+{
+ bool isValid = std::any_of(validInputTypes.begin(),
+ validInputTypes.end(),
+ [&actualValue](onnx::TensorProto::DataType x) { return x == actualValue; } );
+ if (!isValid)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("Datatype %1% is not valid for tensor '%2%' of node '%3%', not in {%4%}. %5%") %
+ onnx::TensorProto::DataType_Name(actualValue) %
+ tensorName %
+ nodeName %
+ validExpr %
+ location.AsString()));
+ }
+}
+
+#define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL, ...) \
+CheckValidDataType({__VA_ARGS__}, ACTUAL, #__VA_ARGS__, NODE, TENSOR, CHECK_LOCATION())
+
+using StrTypeListPair = std::pair<const char*, std::initializer_list<onnx::TensorProto::DataType>>;
+#define STR_LIST(...) StrTypeListPair(#__VA_ARGS__, {__VA_ARGS__})
+
+template <typename Callable>
+void ReadMandatoryNodeAttributeImpl(const onnx::NodeProto& node,
+ const std::string& attribName,
+ onnx::AttributeProto::AttributeType expectedType,
+ Callable callable)
+{
+ auto attribs = node.attribute();
+ int attriNum = 0;
+ while (attriNum < node.attribute_size())
+ {
+ if (attribs.Get(attriNum).name() == attribName)
+ {
+ if (attribs.Get(attriNum).type() == expectedType)
+ {
+ callable(attribs.Get(attriNum));
+ }
+ else
+ {
+ throw ParseException(boost::str(boost::format(
+ "Attribute %1% of node %2% expected to have %3% as onnx::AttributeProto::AttributeType, "
+ "but found %4% instead %5%")
+ % attribName
+ % node.name()
+ % onnx::AttributeProto::AttributeType_Name(expectedType)
+ % onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type())
+ % CHECK_LOCATION().AsString()));
+ }
+ break;
+ }
+ ++attriNum;
+ }
+ if (attriNum == node.attribute_size())
+ {
+ throw ParseException(boost::str(boost::format("Could not find required attribute %1% in node %2% %3%")
+ % attribName % node.name() % CHECK_LOCATION().AsString()));
+ }
+}
+
+template <typename Callable>
+void ReadOptionalNodeAttributeImpl(const onnx::NodeProto& node,
+ const std::string& attribName,
+ onnx::AttributeProto::AttributeType expectedType,
+ Callable callable)
+{
+ auto attribs = node.attribute();
+ for (int attriNum = 0; attriNum < node.attribute_size(); ++attriNum)
+ {
+ if (attribs.Get(attriNum).name() == attribName)
+ {
+ if (attribs.Get(attriNum).type() == expectedType)
+ {
+ callable(attribs.Get(attriNum));
+ }
+ else
+ {
+ throw ParseException(boost::str(boost::format(
+ "Attribute %1% of node %2% expected to have %3% as onnx::AttributeProto::AttributeType, "
+ "but found %4% instead %5%")
+ % attribName
+ % node.name()
+ % onnx::AttributeProto::AttributeType_Name(expectedType)
+ % onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type())
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+ }
+}
+
+std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const onnx::NodeProto& node,
+ const std::string& name)
+{
+ std::vector<uint32_t> attriList;
+ ReadMandatoryNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
+ [&attriList](const onnx::AttributeProto& attrValue)
+ {
+ for (int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
+ {
+ attriList.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(attrValue.ints().Get(attriNum))));
+ }
+ });
+ return attriList;
+}
+
+uint32_t ReadOptionalNodeUint32Attribute(const onnx::NodeProto& node,
+ const std::string& name,
+ const uint32_t defaultVal = 0u)
+{
+ uint32_t attribValue = defaultVal;
+ ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
+ [&attribValue](const onnx::AttributeProto& attrValue)
+ {
+ attribValue = CHECKED_NON_NEGATIVE(CHECKED_INT32((attrValue.i())));
+ });
+ return attribValue;
+}
+
+std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const onnx::NodeProto& node,
+ const std::string& name)
+{
+ std::vector<uint32_t> attriList;
+ ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
+ [&attriList](const onnx::AttributeProto& attrValue)
+ {
+ for (int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
+ {
+ attriList.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(attrValue.ints().Get(attriNum))));
+ }
+ });
+
+ return attriList;
+}
+
+float ReadOptionalNodeFloatAttribute(const onnx::NodeProto& node,
+ const std::string& name,
+ const float defaultValue = 0.0f)
+{
+ float attribValue = defaultValue;
+ ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::FLOAT,
+ [&attribValue](const onnx::AttributeProto& attrValue)
+ {
+ attribValue = attrValue.f();
+ });
+ return attribValue;
+}
+
+std::string ReadOptionalNodeStringAttribute(const onnx::NodeProto& node, const std::string& name)
+{
+ std::string attribValue = "";
+ ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::STRING,
+ [&attribValue](const onnx::AttributeProto& attrValue)
+ {
+ attribValue = attrValue.s();
+ });
+ return attribValue;
+}
+
+armnn::TensorInfo ToTensorInfo(const onnx::ValueInfoProto& info)
+{
+ const onnx::TensorShapeProto onnxShape = info.type().tensor_type().shape();
+ std::vector<unsigned int> shapeDims;
+ for (int i = 0; i < onnxShape.dim_size(); ++i)
+ {
+ shapeDims.push_back(CHECKED_NON_NEGATIVE(CHECKED_INT32(onnxShape.dim(i).dim_value())));
+ }
+ DataType type;
+ switch(info.type().tensor_type().elem_type())
+ {
+ case onnx::TensorProto::FLOAT:
+ {
+ type = DataType::Float32;
+ break;
+ }
+ case onnx::TensorProto::INT32:
+ case onnx::TensorProto::INT64:
+ {
+ type = DataType::Signed32;
+ break;
+ }
+ default:
+ {
+ throw ParseException(
+ boost::str(
+ boost::format("'%1%' is not a currently supported datatype for tensor %2%."
+ " Supported dataTypes are FLOAT, INT32 and INT64. %3%") %
+ onnx::TensorProto::DataType_Name(info.type().tensor_type().elem_type()) %
+ info.name() %
+ CHECK_LOCATION().AsString() ));
+ }
+
+ }
+ return TensorInfo(TensorShape(static_cast<unsigned int>(shapeDims.size()), shapeDims.data()), type);
+}
+
+std::string TensorInfoAsString(const TensorInfo& info,
+ const std::string& name,
+ const onnx::TensorProto::DataType& type)
+{
+ const TensorShape shape = info.GetShape();
+ std::stringstream ss;
+ ss << "tensor '" << name << "' contains "
+ << onnx::TensorProto::DataType_Name(type)
+ << " and has shape [";
+
+ for (uint32_t i = 0; i < shape.GetNumDimensions() - 1; ++i)
+ {
+ ss << shape[i] << ", ";
+ }
+ ss << shape[shape.GetNumDimensions() - 1] << "]";
+ return ss.str();
+}
+
+void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t* paddingFront,
+ uint32_t* paddingBack, bool isUpper)
+{
+ uint32_t outputSize = (inputSize + stride - 1) / stride;
+ uint32_t temp = (outputSize - 1) * stride + filterSize;
+ *paddingFront = (temp - inputSize) / 2;
+ *paddingBack = *paddingFront;
+ if((temp - inputSize) % 2 == 1)
+ {
+ if (isUpper)
+ {
+ *paddingBack += 1;
+ }
+ else
+ {
+ *paddingFront += 1;
+ }
+ }
+}
+
+TensorInfo ComputeReshapeInfo(const onnx::TensorProto& targetShapeTensor,
+ const TensorShape& inShape,
+ const std::string& outName)
+{
+ std::vector<int> targetDims;
+ for(int i = 0; i < targetShapeTensor.int64_data_size(); ++i)
+ {
+ int val = CHECKED_INT32(targetShapeTensor.int64_data(i));
+ if(val == 0)
+ {
+ targetDims.push_back(static_cast<int>(inShape[static_cast<uint>(i)]));
+ }
+ else
+ {
+ targetDims.push_back(val);
+ }
+ }
+
+ std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
+ const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
+ if (stretchDim != targetDims.end())
+ {
+ if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
+ {
+ std::stringstream ss;
+ ss << "[ ";
+ for(uint i = 0; i < targetDims.size() - 1; ++i)
+ {
+ ss << targetDims[i] << ", ";
+ }
+ ss << targetDims[targetDims.size() - 1] << " ]";
+
+ throw ParseException(boost::str(
+ boost::format("Error during creation of reshaped tensor '%1%'. At most one component of shape can be "
+ " -1 and here, shape is %2% %3%")
+ % outName
+ % ss.str()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ auto targetNumElements = boost::numeric_cast<unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
+ -1, std::multiplies<int32_t>()));
+ auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
+ outDims[stretchIndex] = inShape.GetNumElements() / targetNumElements;
+ }
+ TensorShape outShape = TensorShape{static_cast<unsigned int>(outDims.size()), outDims.data()};
+ return TensorInfo(outShape, DataType::Float32);
+}
+
+} //namespace
+
+const std::map<std::string, OnnxParser::OperationParsingFunction> OnnxParser::m_ParserFunctions = {
+ { "BatchNormalization", &OnnxParser::ParseBatchNormalization},
+ { "GlobalAveragePool", &OnnxParser::ParseGlobalAveragePool},
+ { "AveragePool", &OnnxParser::ParseAveragePool },
+ { "Constant", &OnnxParser::ParseConstant },
+ { "MaxPool", &OnnxParser::ParseMaxPool },
+ { "Reshape", &OnnxParser::ParseReshape },
+ { "Relu", &OnnxParser::ParseRelu },
+ { "Conv", &OnnxParser::ParseConv },
+ { "Add", &OnnxParser::ParseAdd },
+};
+
+template<typename TypePair, typename Location>
+void OnnxParser::ValidateInputs(const onnx::NodeProto& node,
+ TypePair validInputs,
+ const Location& location)
+{
+ for(auto input : node.input())
+ {
+ CheckValidDataType(validInputs.second,
+ m_TensorsInfo[input].m_dtype,
+ validInputs.first,
+ node.name(),
+ input,
+ location);
+ }
+}
+
+#define VALID_INPUTS(NODE, VALID_INPUTS) \
+ OnnxParser::ValidateInputs(NODE, \
+ VALID_INPUTS, \
+ CHECK_LOCATION())
+
+std::vector<TensorInfo> OnnxParser::ComputeOutputInfo(std::vector<std::string> outNames,
+ const IConnectableLayer* layer,
+ std::vector<TensorShape> inputShapes)
+{
+ BOOST_ASSERT(! outNames.empty());
+ bool needCompute = std::any_of(outNames.begin(),
+ outNames.end(),
+ [this](std::string name)
+ {
+ return (m_TensorsInfo.count(name) == 0 || m_TensorsInfo[name].m_info == nullptr);
+ });
+ std::vector<TensorInfo> outInfo;
+ //if the output info(s) are not here, we need to compute them
+ std::vector<TensorShape> inferredShapes;
+ if(needCompute)
+ {
+ inferredShapes = layer->InferOutputShapes(inputShapes);
+ BOOST_ASSERT(inferredShapes.size() == outNames.size());
+ }
+ for (uint i = 0; i < outNames.size(); ++i)
+ {
+ if(needCompute)
+ {
+ m_TensorsInfo[outNames[i]] = OnnxTensor();
+ m_TensorsInfo[outNames[i]].m_info = std::make_unique<TensorInfo>(
+ TensorInfo(inferredShapes[i], DataType::Float32));
+ }
+ outInfo.push_back(*m_TensorsInfo[outNames[i]].m_info);
+ }
+ return outInfo;
+}
+
+IOnnxParser* IOnnxParser::CreateRaw()
+{
+ return new OnnxParser();
+}
+
+IOnnxParserPtr IOnnxParser::Create()
+{
+ return IOnnxParserPtr(CreateRaw(), &IOnnxParser::Destroy);
+}
+
+void IOnnxParser::Destroy(IOnnxParser* parser)
+{
+ delete parser;
+}
+
+OnnxParser::OnnxParser()
+ : m_Network(nullptr, nullptr)
+{
+}
+
+void OnnxParser::ResetParser()
+{
+ m_Network = armnn::INetworkPtr(nullptr, nullptr);
+ m_Graph = nullptr;
+}
+
+void OnnxParser::Cleanup()
+{
+ m_TensorConnections.clear();
+ m_TensorsInfo.clear();
+ m_OutputsMap.clear();
+ m_OutputsFusedAndUsed.clear();
+}
+
+std::pair<ConstTensor, std::unique_ptr<float[]>> OnnxParser::CreateConstTensor(const std::string name)
+{
+ const TensorInfo tensorInfo = *m_TensorsInfo[name].m_info;
+ onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor;
+
+ auto srcData = onnxTensor.float_data().data();
+ if(tensorInfo.GetNumElements() != static_cast<uint>(onnxTensor.float_data_size()))
+ {
+ throw ParseException(boost::str(
+ boost::format("The number of data provided (%1%) does not match the tensor '%2%' number of elements"
+ " (%3%) %4%")
+ % onnxTensor.float_data_size()
+ % name
+ % tensorInfo.GetNumElements()
+ % CHECK_LOCATION().AsString()));
+ }
+ std::unique_ptr<float[]> tensorData(new float[tensorInfo.GetNumElements()]);
+
+ // Copy the value list entries into the destination
+ ::memcpy(tensorData.get(),srcData, tensorInfo.GetNumBytes());
+
+ // Const tensors requires at least a list of values
+ if (tensorInfo.GetNumElements() == 0)
+ {
+ throw ParseException(boost::str(
+ boost::format("No tensor data found for Const tensor '%1%' %2%")
+ % name
+ % CHECK_LOCATION().AsString()));
+ }
+ return std::make_pair(ConstTensor(tensorInfo, tensorData.get()), std::move(tensorData));
+}
+
+ModelPtr OnnxParser::LoadModelFromTextFile(const char* graphFile)
+{
+ FILE* fd = fopen(graphFile, "r");
+
+ if (fd == nullptr)
+ {
+ throw FileNotFoundException(boost::str(
+ boost::format("Invalid (null) filename %1%") % CHECK_LOCATION().AsString()));
+ }
+
+ // Parse the file into a message
+ ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
+ using google::protobuf::io::FileInputStream;
+ std::unique_ptr<FileInputStream> input = std::make_unique<FileInputStream>(fileno(fd));
+ bool success = google::protobuf::TextFormat::Parse(input.get(), modelProto.get());
+ fclose(fd);
+
+ if (!success)
+ {
+ std::stringstream error;
+ error << "Failed to parse graph file";
+ throw ParseException(boost::str(
+ boost::format("%1% %2%") % error.str() % CHECK_LOCATION().AsString()));
+ }
+ return modelProto;
+}
+
+INetworkPtr OnnxParser::CreateNetworkFromTextFile(const char* graphFile)
+{
+ ResetParser();
+ ModelPtr modelProto = LoadModelFromTextFile(graphFile);
+ return CreateNetworkFromModel(*modelProto);
+}
+
+
+ModelPtr OnnxParser::LoadModelFromBinaryFile(const char* graphFile)
+{
+ FILE* fd = fopen(graphFile, "rb");
+
+ if (fd == nullptr)
+ {
+ throw FileNotFoundException(boost::str(
+ boost::format("Invalid (null) filename %1%") % CHECK_LOCATION().AsString()));
+ }
+
+ // Parse the file into a message
+ ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
+
+ google::protobuf::io::FileInputStream inStream(fileno(fd));
+ google::protobuf::io::CodedInputStream codedStream(&inStream);
+ codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
+ bool success = modelProto.get()->ParseFromCodedStream(&codedStream);
+ fclose(fd);
+
+ if (!success)
+ {
+ std::stringstream error;
+ error << "Failed to parse graph file";
+ throw ParseException(boost::str(
+ boost::format("%1% %2%") % error.str() % CHECK_LOCATION().AsString()));
+ }
+ return modelProto;
+
+}
+
+INetworkPtr OnnxParser::CreateNetworkFromBinaryFile(const char* graphFile)
+{
+ ResetParser();
+ ModelPtr modelProto = LoadModelFromBinaryFile(graphFile);
+ return CreateNetworkFromModel(*modelProto);
+}
+
+ModelPtr OnnxParser::LoadModelFromString(const std::string& protoText)
+{
+ if (protoText == "")
+ {
+ throw InvalidArgumentException(boost::str(
+ boost::format("Invalid (empty) string for model parameter %1%") % CHECK_LOCATION().AsString()));
+ }
+ // Parse the string into a message
+ ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
+ bool success = google::protobuf::TextFormat::ParseFromString(protoText, modelProto.get());
+ if (!success)
+ {
+ std::stringstream error;
+ error << "Failed to parse graph file";
+ throw ParseException(boost::str(
+ boost::format("%1% %2%") % error.str() % CHECK_LOCATION().AsString()));
+ }
+ return modelProto;
+}
+
+INetworkPtr OnnxParser::CreateNetworkFromString(const std::string& protoText)
+{
+ ResetParser();
+ ModelPtr modelProto = LoadModelFromString(protoText);
+ return CreateNetworkFromModel(*modelProto);
+}
+
+INetworkPtr OnnxParser::CreateNetworkFromModel(onnx::ModelProto& model)
+{
+ m_Network = INetwork::Create();
+ try
+ {
+ m_Graph = std::make_unique<onnx::GraphProto>(*model.mutable_graph());
+ LoadGraph();
+ }
+ catch (const ParseException& e)
+ {
+ Cleanup();
+ throw e;
+ }
+ Cleanup();
+ return std::move(m_Network);
+}
+
+void OnnxParser::LoadGraph()
+{
+ BOOST_ASSERT(m_Graph.get() != nullptr);
+
+ //Fill m_TensorsInfo with the shapes and value of every tensor
+ SetupInfo(m_Graph->mutable_output());
+ SetupInfo(m_Graph->mutable_input());
+ SetupInfo(m_Graph->mutable_value_info());
+
+ for (auto tensor : m_Graph->initializer())
+ {
+ m_TensorsInfo[tensor.name()].m_tensor = std::make_unique<const onnx::TensorProto>(tensor);
+ }
+
+ SetupInputLayers();
+ SetupOutputLayers();
+
+ //Detect FullyConnected layers with bias and update the FusedAndUsed map acccordingly
+ DetectFullyConnected();
+
+ //Parsing the graph
+ for(size_t nodeIndex = 0; nodeIndex < static_cast<size_t>(m_Graph->node_size()); nodeIndex++)
+ {
+ auto node = m_Graph->node(static_cast<int>(nodeIndex));
+ const std::string& operation = node.op_type();
+
+ // check which layers we handled already (add and matmul fused as FC)
+ if(operation == "MatMul" )
+ {
+ if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
+ {
+ //Node which can not be fused as a FullyConnected layer (used in layers as a simple matmul output)
+ AddFullyConnected(node);
+ }
+ }
+ else if (!(m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) && operation == "Add")
+ {
+ int matmulIndex = static_cast<int> (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes[0]);
+ AddFullyConnected(m_Graph->node(matmulIndex), &node);
+ }
+ else if (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) //node is not part of a fused layer
+ {
+ auto it = m_ParserFunctions.find(operation);
+ if (it != m_ParserFunctions.end())
+ {
+ auto func = it->second;
+ (this->*func)(node);
+ }
+ else
+ {
+ throw ParseException(boost::str(
+ boost::format("Unsupported operation %1% for node '%2%' %3%")
+ % operation
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+ }
+
+ //Making the connections between outputs and inputs of each layers
+ for (const auto& tensorCon : m_TensorConnections)
+ {
+ if (tensorCon.second.outputSlot != nullptr)
+ {
+ for (size_t inputSlotIdx = 0; inputSlotIdx < tensorCon.second.inputSlots.size(); ++inputSlotIdx)
+ {
+ tensorCon.second.outputSlot->Connect(*(tensorCon.second.inputSlots[inputSlotIdx]));
+ }
+ }
+ }
+}
+
+void OnnxParser::SetupInfo(const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list)
+{
+ for (auto tensor : *list)
+ {
+ m_TensorsInfo[tensor.name()] = OnnxTensor();
+ m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(ToTensorInfo(tensor));
+ m_TensorsInfo[tensor.name()].m_dtype = tensor.type().tensor_type().elem_type();
+ }
+}
+
+void OnnxParser::DetectFullyConnected()
+{
+ m_OutputsFusedAndUsed = std::vector<UsageSummary> (static_cast<size_t>(m_Graph->node_size()), UsageSummary());
+ auto matmulAndConstant = [&](const std::string& constInput,
+ const std::string& matmulInput,
+ int& nodeIndex)
+ {
+ auto matmulIt = m_OutputsMap.find(matmulInput);
+ if(matmulIt != m_OutputsMap.end() && matmulIt->second.first->op_type() == "MatMul"
+ && m_TensorsInfo[constInput].isConstant())
+ {
+ nodeIndex = matmulIt->second.second;
+ return true;
+ }
+ return false;
+ };
+
+ for(int nodeIndex = 0; nodeIndex < m_Graph->node_size(); nodeIndex++)
+ {
+ const onnx::NodeProto* node = &m_Graph->node(nodeIndex);
+ for (const std::string& output : node->output())
+ {
+ m_OutputsMap[output] = std::make_pair(node, nodeIndex);
+ }
+
+ for (const std::string& input : node->input()) //count how many time a node is used as input
+ {
+ auto matmulIt = m_OutputsMap.find(input);
+ if(matmulIt != m_OutputsMap.end()){
+ ++m_OutputsFusedAndUsed[static_cast<size_t>(matmulIt->second.second)].inputForNodes; //node used
+ }
+ }
+
+ if (node->op_type() == "Add")
+ {
+ int matmulIndex = 0;
+ if (matmulAndConstant(node->input(0), node->input(1), matmulIndex) ||
+ matmulAndConstant(node->input(1), node->input(0), matmulIndex))
+ {
+ //matmul and add were fused
+ m_OutputsFusedAndUsed[static_cast<size_t>(matmulIndex)].fusedWithNodes
+ .push_back(static_cast<size_t>(nodeIndex));
+
+ m_OutputsFusedAndUsed[static_cast<size_t>(nodeIndex)].fusedWithNodes
+ .push_back(static_cast<size_t>(matmulIndex));
+ }
+ }
+ }
+
+ for (auto output: m_Graph->output()) { //Add usages as output of the graph in count of usages
+ auto matmulIt = m_OutputsMap.find(output.name());
+ if(matmulIt != m_OutputsMap.end()){
+ ++m_OutputsFusedAndUsed[static_cast<size_t>(matmulIt->second.second)].inputForNodes;
+ }
+ }
+}
+
+template<typename Location>
+void OnnxParser::GetInputAndParam(const onnx::NodeProto& node,
+ std::string* inputName,
+ std::string* constName,
+ const Location& location)
+{
+ int cstIndex;
+ if (m_TensorsInfo[node.input(0)].isConstant())
+ {
+ cstIndex = 0;
+ }
+ else if (m_TensorsInfo[node.input(1)].isConstant())
+ {
+ cstIndex = 1;
+ }
+ else
+ {
+ throw ParseException(boost::str(
+ boost::format("One of the input tensors ('%1%' or '%2%') should be constant in node '%3%' %4%")
+ % node.input(0)
+ % node.input(1)
+ % node.name()
+ % location.AsString()));
+ }
+ if(constName)
+ {
+ *constName = node.input(cstIndex);
+ }
+ if(inputName)
+ {
+ *inputName = node.input(!cstIndex);
+ }
+}
+
+template<typename Location>
+void OnnxParser::To1DTensor(const std::string& name, const Location& location)
+{
+ TensorShape shape = m_TensorsInfo[name].m_info->GetShape();
+ std::vector<uint32_t> newShape;
+ for(uint i = 0; i < shape.GetNumDimensions() - 1; ++i)
+ {
+ if(shape[i] != 1)
+ {
+ throw ParseException(boost::str(
+ boost::format("Only tensors with shape [1, ..., 1, X] can be converted to 1D and %1% %2%")
+ % TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype)
+ % location.AsString()));
+ }
+ }
+ newShape.push_back(shape[shape.GetNumDimensions() - 1]);
+
+ m_TensorsInfo[name].m_info->SetShape(TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
+}
+
+void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx::NodeProto* addNode)
+{
+
+ // find matmul inputs
+ std::string weightName;
+ std::string inputName;
+ CHECK_VALID_SIZE(static_cast<size_t>(matmulNode.input_size()), 2);
+ CHECK_VALID_SIZE(static_cast<size_t>(matmulNode.output_size()), 1);
+ VALID_INPUTS(matmulNode, STR_LIST(onnx::TensorProto::FLOAT));
+
+ GetInputAndParam(matmulNode, &inputName, &weightName, CHECK_LOCATION());
+
+ FullyConnectedDescriptor desc;
+ desc.m_BiasEnabled = addNode != nullptr;
+
+ IConnectableLayer* layer = nullptr;
+ if(desc.m_BiasEnabled)
+ {
+ // find bias const
+ std::string biasName;
+ CHECK_VALID_SIZE(static_cast<size_t>(addNode->input_size()), 2);
+ CHECK_VALID_SIZE(static_cast<size_t>(addNode->output_size()), 1);
+ VALID_INPUTS(*addNode, STR_LIST(onnx::TensorProto::FLOAT));
+
+ GetInputAndParam(*addNode, nullptr, &biasName, CHECK_LOCATION());
+
+ //Output shape is [1, weights[1]] and 1d vec in ONNX can be [1,X] so we convert biases to "armnn" 1D
+ To1DTensor(biasName, CHECK_LOCATION());
+ TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
+ TensorInfo biasInfo = *m_TensorsInfo[biasName].m_info;
+
+ if (weightInfo.GetShape()[1] != biasInfo.GetShape()[0])
+ {
+ throw ParseException(boost::str(
+ boost::format("Shape of weights '%1%' and bias of following Add node '%2%' do not match : %3%"
+ " and %4% ( /!\\ bias should be a 1D tensor) %5%")
+ % weightName
+ % addNode->name()
+ % TensorInfoAsString(*m_TensorsInfo[weightName].m_info,
+ weightName,
+ m_TensorsInfo[weightName].m_dtype)
+ % TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
+ m_TensorsInfo[biasName].m_dtype )
+ % CHECK_LOCATION().AsString()));
+ }
+ layer = m_Network->AddFullyConnectedLayer(desc,
+ CreateConstTensor(weightName).first,
+ CreateConstTensor(biasName).first,
+ matmulNode.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
+ {m_TensorsInfo[inputName].m_info->GetShape(),
+ m_TensorsInfo[weightName].m_info->GetShape()});
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ RegisterInputSlots(layer, {inputName});
+ RegisterOutputSlots(layer, {addNode->output(0)});
+ }
+ else
+ {
+ layer = m_Network->AddFullyConnectedLayer(desc, CreateConstTensor(weightName).first, matmulNode.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
+ {m_TensorsInfo[inputName].m_info->GetShape(),
+ m_TensorsInfo[weightName].m_info->GetShape()});
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ RegisterInputSlots(layer, {inputName});
+ RegisterOutputSlots(layer, {matmulNode.output(0)});
+ }
+}
+
+void OnnxParser::CreateConstantLayer(const std::string& tensorName, const std::string& layerName)
+{
+ auto armnnTensor = CreateConstTensor(tensorName);
+
+ IConnectableLayer* layer = m_Network->AddConstantLayer(armnnTensor.first, layerName.c_str());
+ layer->GetOutputSlot(0).SetTensorInfo(armnnTensor.first.GetInfo());
+ RegisterOutputSlots(layer, {tensorName});
+}
+
+void OnnxParser::ParseConstant(const onnx::NodeProto& node)
+{
+ CHECK_VALID_SIZE(static_cast<size_t>(node.attribute_size()), 1);
+
+ if (!node.attribute(0).has_t())
+ {
+ throw ParseException(boost::str(
+ boost::format("Value not found for Constant node '%1%' %2%")
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ const onnx::TensorProto& onnxTensor = node.attribute(0).t();
+
+ //ONNX can have Float16 and double constant nodes but ArmNN only supports float32
+ CHECK_VALID_DATATYPE(node.name(), onnxTensor.name(), onnxTensor.data_type(), onnx::TensorProto::FLOAT);
+
+ //Register this as a m_ConstParam so we know we can use it as a constant param in future layers.
+ m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
+
+ CreateConstantLayer(node.output(0), node.name());
+
+}
+
+void OnnxParser::ParseMaxPool(const onnx::NodeProto& node)
+{
+ Pooling2dDescriptor desc;
+ desc.m_PoolType = PoolingAlgorithm::Max;
+ desc.m_PaddingMethod = PaddingMethod::Exclude;
+ AddPoolingLayer(node, desc);
+}
+
+void OnnxParser::ParseGlobalAveragePool(const onnx::NodeProto& node)
+{
+ Pooling2dDescriptor desc = Pooling2dDescriptor();
+ desc.m_PoolType = PoolingAlgorithm::Average;
+
+ //kernel size is the same as input
+ TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
+ desc.m_PoolWidth = inputShape[3];
+ desc.m_PoolHeight = inputShape[2];
+
+ IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ RegisterInputSlots(layer, {node.input(0)});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+void OnnxParser::ParseAveragePool(const onnx::NodeProto& node)
+{
+ Pooling2dDescriptor desc;
+ desc.m_PoolType = PoolingAlgorithm::Average;
+
+ uint32_t count_include_pad = 0;
+ count_include_pad = ReadOptionalNodeUint32Attribute(node, "count_include_pad");
+ if(count_include_pad) {
+ desc.m_PaddingMethod = PaddingMethod::IgnoreValue;
+ }
+ AddPoolingLayer(node, desc);
+}
+
+void OnnxParser::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescriptor& desc)
+{
+
+ CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
+ CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
+
+ VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
+
+ std::vector<uint32_t> kernel_shape = ReadMandatoryNodeUint32ListAttribute(node, "kernel_shape"); //size of pool win
+ std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node, "strides");
+ std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
+
+ desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
+ desc.m_PoolWidth = kernel_shape[1];
+ desc.m_PoolHeight = kernel_shape[0];
+
+ if(strides.empty())
+ {
+ desc.m_StrideX = 1;
+ desc.m_StrideY = 1;
+ }
+ else
+ {
+ desc.m_StrideX = strides[1];
+ desc.m_StrideY = strides[0];
+ }
+
+ //Check new padding version first
+ if(pads.empty())
+ {
+ //Check deprecated version
+ std::string paddingString = ReadOptionalNodeStringAttribute(node, "auto_pad");
+ if(paddingString != "VALID" && paddingString != "" && paddingString != "NOTSET")
+ {
+ bool isUpper;
+ if( paddingString == "SAME_LOWER")
+ {
+ isUpper = false;
+ }
+ else if (paddingString == "SAME_UPPER")
+ {
+ isUpper = true;
+ }
+ else
+ {
+ throw ParseException(boost::str(
+ boost::format("Invalid auto_pad attribute for node %1%. "
+ "Only SAME_UPPER, SAME_LOWER or VALID supported and found %2% %3%")
+ % node.name()
+ % paddingString
+ % CHECK_LOCATION().AsString()));
+ }
+ auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
+ uint32_t inputHeight = inputInfo.GetShape()[2];
+ uint32_t inputWidth = inputInfo.GetShape()[3];
+ CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, &desc.m_PadTop, &desc.m_PadBottom, isUpper);
+ CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, &desc.m_PadLeft, &desc.m_PadRight, isUpper);
+ }
+ }
+ else
+ {
+ desc.m_PadTop = pads[0];
+ desc.m_PadLeft = pads[1];
+ desc.m_PadBottom = pads[2];
+ desc.m_PadRight = pads[3];
+ }
+
+ IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ RegisterInputSlots(layer, {node.input(0)});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+void OnnxParser::CreateReshapeLayer(const std::string& inputName,
+ const std::string& outputName,
+ const std::string& layerName)
+{
+ const TensorInfo outputTensorInfo = *m_TensorsInfo[outputName].m_info;
+ ReshapeDescriptor reshapeDesc;
+ reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
+
+ IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
+ BOOST_ASSERT(layer != nullptr);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ RegisterInputSlots(layer, {inputName});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ RegisterOutputSlots(layer, {outputName});
+}
+
+void OnnxParser::ParseReshape(const onnx::NodeProto& node)
+{
+ CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
+ CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
+
+ CHECK_VALID_DATATYPE(node.name(), node.input(0),
+ m_TensorsInfo[node.input(0)].m_dtype,
+ onnx::TensorProto::FLOAT); //input
+ CHECK_VALID_DATATYPE(node.name(), node.input(1),
+ m_TensorsInfo[node.input(1)].m_dtype,
+ onnx::TensorProto::INT64); //shape
+
+ if(!m_TensorsInfo[node.input(1)].isConstant())
+ {
+ throw ParseException(boost::str(
+ boost::format("Shape '%1%' should be constant in Reshape layer '%2%' %3%")
+ % node.input(1)
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ if(m_TensorsInfo[node.input(0)].isConstant())
+ {
+ //make a new cst tensor -> move the data to the output tensor (the shape is already good in the output tensor)
+ if(m_TensorsInfo.count(node.output(0)) == 0)
+ {
+ m_TensorsInfo[node.output(0)] = OnnxTensor();
+ }
+ m_TensorsInfo[node.output(0)].m_tensor =
+ std::make_unique<onnx::TensorProto>(*m_TensorsInfo[node.input(0)].m_tensor);
+ }
+ else
+ {
+ TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
+
+ if(m_TensorsInfo.count(node.output(0)) == 0 || m_TensorsInfo[node.output(0)].m_info == nullptr)
+ {
+ auto outInfo = ComputeReshapeInfo(*m_TensorsInfo[node.input(1)].m_tensor, inputShape, node.output(0));
+ m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
+ }
+
+ CreateReshapeLayer(node.input(0), node.output(0), node.name());
+ }
+}
+
+void OnnxParser::ParseRelu(const onnx::NodeProto& node)
+{
+ CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 1);
+ CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
+
+ VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
+
+ ActivationDescriptor desc;
+ desc.m_Function = ActivationFunction::ReLu;
+
+ IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ RegisterInputSlots(layer, {node.input(0)});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+
+void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc)
+{
+ BOOST_ASSERT(node.op_type() == "Conv");
+
+ DepthwiseConvolution2dDescriptor desc;
+ desc.m_PadLeft = convDesc.m_PadLeft;
+ desc.m_PadRight = convDesc.m_PadRight;
+ desc.m_PadTop = convDesc.m_PadTop;
+ desc.m_PadBottom = convDesc.m_PadBottom;
+ desc.m_StrideX = convDesc.m_StrideX;
+ desc.m_StrideY = convDesc.m_StrideY;
+ desc.m_BiasEnabled = convDesc.m_BiasEnabled;
+
+ armnn::IConnectableLayer* layer;
+ auto weightTensor = CreateConstTensor(node.input(1));
+ TensorShape& weightShape = weightTensor.first.GetShape();
+ weightShape[1] = weightShape[0];
+ weightShape[0] = 1;
+ m_TensorsInfo[node.input(1)].m_info->SetShape(weightShape);
+
+ if (node.input_size() == 3)
+ {
+ if(!m_TensorsInfo[node.input(2)].isConstant())
+ {
+ throw ParseException(boost::str(
+ boost::format("Bias '%1%' should be constant in Conv layer '%2%' %3%")
+ % node.input(2)
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ desc.m_BiasEnabled = true;
+ auto biasTensor = CreateConstTensor(node.input(2));
+ layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
+ weightTensor.first,
+ biasTensor.first,
+ node.name().c_str());
+ }
+ else
+ {
+ layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
+ weightTensor.first,
+ node.name().c_str());
+ }
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
+ { m_TensorsInfo[node.input(0)].m_info->GetShape(),
+ m_TensorsInfo[node.input(1)].m_info->GetShape() });
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ RegisterInputSlots(layer, {node.input(0)});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+void OnnxParser::ParseConv(const onnx::NodeProto& node)
+{
+ CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2, 3); //input, weight, (bias)
+ CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
+
+ VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
+
+ if(m_TensorsInfo[node.input(0)].m_info->GetNumDimensions() != 4)
+ {
+ throw ParseException(boost::str(
+ boost::format("ArmNN only supports 2D convolution and Conv layer '%1%' input %2% %3%")
+ % node.name()
+ % TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
+ m_TensorsInfo[node.input(0)].m_dtype)
+ % CHECK_LOCATION().AsString()));
+ }
+
+ if(!m_TensorsInfo[node.input(1)].isConstant())
+ {
+ throw ParseException(boost::str(
+ boost::format("Weights '%1%' should be constant in Conv layer '%2%' %3%")
+ % node.input(1)
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
+
+ std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node, "dilations");
+ if (!dilations.empty())
+ {
+ std::stringstream ss;
+ ss << "[ ";
+ for (auto dilation : dilations)
+ {
+ ss << dilation << ", ";
+ if (dilation != 1u)
+ {
+ ss << "... ]";
+ throw ParseException(boost::str(
+ boost::format("ArmNN only supports Convolution layers with dilations [1,1], and node '%1%' "
+ "has dilatation %2% %3%")
+ % node.name()
+ % ss.str()
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+ }
+
+ Convolution2dDescriptor desc;
+ desc.m_BiasEnabled = false;
+
+ std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node, "strides");
+ if(strides.empty())
+ {
+ desc.m_StrideX = 1;
+ desc.m_StrideY = 1;
+ }
+ else
+ {
+ desc.m_StrideX = strides[1];
+ desc.m_StrideY = strides[0];
+ }
+
+ std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node, "pads");
+ //Check new padding version first
+ if(pads.empty())
+ {
+ //Check deprecated version
+ std::string paddingString = ReadOptionalNodeStringAttribute(node, "auto_pad");
+ if(paddingString != "VALID" && paddingString != "" && paddingString != "NOTSET")
+ {
+ bool isUpper;
+ if( paddingString == "SAME_LOWER")
+ {
+ isUpper = false;
+ }
+ else if (paddingString == "SAME_UPPER")
+ {
+ isUpper = true;
+ }
+ else
+ {
+ throw ParseException(boost::str(
+ boost::format("Invalid auto_pad attribute for node %1%. "
+ "Only SAME_UPPER, SAME_LOWER or VALID supported and found %2% %3%")
+ % node.name()
+ % paddingString
+ % CHECK_LOCATION().AsString()));
+ }
+ uint32_t inputHeight = inputInfo.GetShape()[2];
+ uint32_t inputWidth = inputInfo.GetShape()[3];
+
+ uint32_t weightHeight;
+ uint32_t weightWidth;
+ std::vector<uint32_t> kernel_shape = ReadOptionalNodeUint32ListAttribute(node, "kernel_shape");
+ if (kernel_shape.empty())
+ {
+ const TensorInfo weightTensorInfo = *m_TensorsInfo[node.input(1)].m_info;
+ weightHeight = weightTensorInfo.GetShape()[2];
+ weightWidth = weightTensorInfo.GetShape()[3];
+ }
+ else
+ {
+ weightHeight = kernel_shape[0];
+ weightWidth = kernel_shape[1];
+ }
+ CalcPadding(inputHeight, weightHeight, desc.m_StrideY, &desc.m_PadTop, &desc.m_PadBottom, isUpper);
+ CalcPadding(inputWidth, weightWidth, desc.m_StrideX, &desc.m_PadLeft, &desc.m_PadRight, isUpper);
+ }
+ }
+ else
+ {
+ desc.m_PadTop = pads[0];
+ desc.m_PadLeft = pads[1];
+ desc.m_PadBottom = pads[2];
+ desc.m_PadRight = pads[3];
+ }
+
+ uint32_t group = ReadOptionalNodeUint32Attribute(node, "group", 1);
+ if(group > 1)
+ {
+ if (group > inputInfo.GetShape()[1])
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Error parsing Convolution node: %1%. "
+ "The 'group'=%2% parameter cannot be larger than the "
+ "channel of the input shape=%3% (in NCHW format). %4%") %
+ node.name() %
+ group %
+ inputInfo.GetShape()[1] %
+ CHECK_LOCATION().AsString()));
+ }
+ else if (group == inputInfo.GetShape()[1])
+ {
+ // we use a depthwise convolution here, because the number of groups equals to the
+ // input channels
+ AddConvLayerWithDepthwiseConv(node, desc);
+ return;
+ }
+ else
+ {
+ // TODO: split the input by channels into channels/groups separate convolutions
+ // and merger the results afterwards
+ throw ParseException(boost::str(
+ boost::format("Error parsing Convolution node: %1%. "
+ "The 'group'=%2% parameter should be 1 or be equal to the "
+ "channel of the input shape=%3% (in NCHW format). %4%") %
+ node.name() %
+ group %
+ inputInfo.GetShape()[1] %
+ CHECK_LOCATION().AsString()));
+ }
+ }
+
+ armnn::IConnectableLayer* layer;
+ auto weightTensor = CreateConstTensor(node.input(1));
+
+ if (node.input_size() == 3)
+ {
+ if(!m_TensorsInfo[node.input(2)].isConstant())
+ {
+ throw ParseException(boost::str(
+ boost::format("Bias '%1%' should be constant in Conv layer '%2%' %3%")
+ % node.input(2)
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ desc.m_BiasEnabled = true;
+ auto biasTensor = CreateConstTensor(node.input(2));
+ layer = m_Network->AddConvolution2dLayer(desc,
+ weightTensor.first,
+ biasTensor.first,
+ node.name().c_str());
+ }
+ else
+ {
+ layer = m_Network->AddConvolution2dLayer(desc,
+ weightTensor.first,
+ node.name().c_str());
+ }
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
+ { m_TensorsInfo[node.input(0)].m_info->GetShape(),
+ m_TensorsInfo[node.input(1)].m_info->GetShape() });
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ // register the input connection slots for the layer, connections are made after all layers have been created
+ // only the tensors for the inputs are relevant, exclude the const tensors
+ RegisterInputSlots(layer, {node.input(0)});
+
+ // register the output connection slots for the layer, connections are made after all layers have been created
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+void OnnxParser::PrependForBroadcast(const std::string& outputName,
+ const std::string& input0,
+ const std::string& input1)
+{
+ //input0 should be reshaped to have same number of dim as input1
+ TensorInfo outputTensorInfo = TensorInfo(*m_TensorsInfo[input0].m_info);
+
+ TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
+ TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
+
+ uint32_t diff = input1Shape.GetNumDimensions() - input0Shape.GetNumDimensions();
+ std::vector<uint32_t> newShape;
+ while(diff > 0)
+ {
+ newShape.push_back(1);
+ diff--;
+ }
+ for (uint dim = 0; dim < input0Shape.GetNumDimensions(); ++dim)
+ {
+ newShape.push_back(input0Shape[dim]);
+ }
+ outputTensorInfo.SetShape(TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
+
+ //add the new tensor to m_TensorsInfo
+ m_TensorsInfo[outputName] = OnnxTensor();
+ m_TensorsInfo[outputName].m_info = std::make_unique<TensorInfo>(outputTensorInfo);
+
+ //add reshape layer if the parent was not constant...
+ if( ! m_TensorsInfo[input0].isConstant())
+ {
+ CreateReshapeLayer(input0, outputName, boost::str(boost::format("Add:reshapeOf%1%") % input0));
+ }
+ else //make it constant and it will be create in Add
+ {
+ m_TensorsInfo[outputName].m_tensor = std::make_unique<onnx::TensorProto>(*m_TensorsInfo[input0].m_tensor);
+
+ }
+}
+
+std::pair<std::string, std::string> OnnxParser::AddPrepareBroadcast(const std::string& input0,
+ const std::string& input1)
+{
+ std::pair<std::string, std::string> inputs = std::make_pair(input0, input1);
+
+ TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
+ TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
+
+ if(input1Shape.GetNumDimensions() < input0Shape.GetNumDimensions())
+ {
+ auto outputName = boost::str(boost::format("reshape_output_%1%") % input1);
+ PrependForBroadcast(outputName, input1, input0);
+ inputs.second = outputName;
+ }
+ else if(input0Shape.GetNumDimensions() < input1Shape.GetNumDimensions())
+ {
+ auto outputName = boost::str(boost::format("reshape_output_%1%") % input0);
+ PrependForBroadcast(outputName, input0, input1);
+ inputs.first = outputName;
+ }
+ return inputs;
+}
+
+void OnnxParser::ParseAdd(const onnx::NodeProto& node)
+{
+ CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 2);
+ CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
+
+ VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
+
+ // TODO: unify broadcast validation code across layers
+ // tracked by: IVGCVSW-1576
+
+ // Checking broadcast compatibility : only scalar or 1D tensors
+ auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
+ auto input0 = *m_TensorsInfo[inputs.first].m_info;
+ auto input1 = *m_TensorsInfo[inputs.second].m_info;
+ BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+
+ unsigned int numDims = input0.GetNumDimensions();
+ for (unsigned int i = 0; i < numDims; i++)
+ {
+ unsigned int dim0 = input0.GetShape()[i];
+ unsigned int dim1 = input1.GetShape()[i];
+ if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+ {
+ throw ParseException(boost::str(
+ boost::format("Broadcast is only supported for scalar or 1D tensors in Add node '%1%'. "
+ "Input dimensions should either match or one should be of size 1 and here, "
+ "%2% and %3% %4%")
+ % node.name()
+ % TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
+ m_TensorsInfo[inputs.first].m_dtype)
+ % TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
+ m_TensorsInfo[inputs.second].m_dtype)
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+
+
+ IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
+ { m_TensorsInfo[inputs.first].m_info->GetShape(),
+ m_TensorsInfo[inputs.second].m_info->GetShape() });
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ // register the input connection -> for constant inputs, we need to make a newDim constant layer
+ if(m_TensorsInfo[inputs.first].isConstant()) {
+
+ CreateConstantLayer(inputs.first, boost::str(boost::format("Add:constant_of_%1%") % node.input(0)));
+ }
+ if(m_TensorsInfo[inputs.second].isConstant()) {
+
+ CreateConstantLayer(inputs.second, boost::str(boost::format("Add:constant_of_%1%") % node.input(1)));
+ }
+ RegisterInputSlots(layer, {inputs.first, inputs.second});
+
+ // register the output connection
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+void OnnxParser::ParseBatchNormalization(const onnx::NodeProto& node)
+{
+ //IGNORE momentum parameter and spatial parameters
+
+ CHECK_VALID_SIZE(static_cast<size_t>(node.input_size()), 5);
+ CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
+
+ VALID_INPUTS(node, STR_LIST(onnx::TensorProto::FLOAT));
+ for(int ind = 1; ind < node.input_size(); ++ind)
+ {
+ auto tensor = node.input(ind);
+ if(! m_TensorsInfo[tensor].isConstant())
+ {
+ throw ParseException(boost::str(
+ boost::format("Input tensor '%1%' should be constant in BatchNormalization node '%2%' %3%")
+ % tensor
+ % node.name()
+ % CHECK_LOCATION().AsString()));
+ }
+ }
+
+ float epsilon = ReadOptionalNodeFloatAttribute(node, "epsilon", 1e-5f);
+ BatchNormalizationDescriptor desc;
+ desc.m_Eps = epsilon;
+
+ auto scaleTensor = CreateConstTensor(node.input(1));
+ auto biasTensor = CreateConstTensor(node.input(2));
+ auto meanTensor = CreateConstTensor(node.input(3));
+ auto varTensor = CreateConstTensor(node.input(4));
+
+ IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
+ meanTensor.first,
+ varTensor.first,
+ biasTensor.first,
+ scaleTensor.first,
+ node.name().c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
+
+ RegisterInputSlots(layer, {node.input(0)}); //don't register constant inputs
+
+ // register the output connection
+ RegisterOutputSlots(layer, {node.output(0)});
+}
+
+void OnnxParser::SetupInputLayers()
+{
+ //Find user input and add their layers
+ for(int inputIndex = 0; inputIndex < m_Graph->input_size(); ++inputIndex)
+ {
+ auto input = m_Graph->input(inputIndex);
+ if (! m_TensorsInfo[input.name()].isConstant())
+ {
+ IConnectableLayer* layer =
+ m_Network->AddInputLayer(static_cast<armnn::LayerBindingId>(inputIndex), input.name().c_str());
+ auto tensorInfo = ToTensorInfo(input);
+ layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ RegisterOutputSlots(layer,{ input.name() });
+ }
+ }
+}
+
+void OnnxParser::SetupOutputLayers()
+{
+ if(m_Graph->output_size() == 0)
+ {
+ throw ParseException(boost::str(boost::format("The given model does not have any outputs %1%")
+ % CHECK_LOCATION().AsString()));
+ }
+
+ for(int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
+ {
+ IConnectableLayer* layer =
+ m_Network->AddOutputLayer(static_cast<armnn::LayerBindingId>(outputIndex),
+ m_Graph->output(outputIndex).name().c_str());
+
+ RegisterInputSlots(layer, { m_Graph->output(outputIndex).name() });
+ }
+}
+
+void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
+{
+ BOOST_ASSERT(layer != nullptr);
+ if (tensorIds.size() != layer->GetNumInputSlots())
+ {
+ throw ParseException(
+ boost::str(boost::format("The number of tensor inputs (%1%) does not match the number expected (%2%) %3%") %
+ tensorIds.size() %
+ layer->GetNumInputSlots() %
+ CHECK_LOCATION().AsString()));
+ }
+ for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
+ {
+ std::string tensorId = tensorIds[slotIndex];
+ armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
+
+ auto it = m_TensorConnections.find(tensorId);
+
+ if (it == m_TensorConnections.end())
+ {
+ //First time seing this tensor, we need to map it
+ m_TensorConnections[tensorId] = TensorSlots();
+ }
+ m_TensorConnections[tensorId].inputSlots.push_back(slot);
+ }
+}
+
+void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
+{
+ BOOST_ASSERT(layer != nullptr);
+ if (tensorIds.size() != layer->GetNumOutputSlots())
+ {
+ throw ParseException(
+ boost::str(boost::format("The number of tensor outputs (%1%) does not match the number expected (%2%) %3% ")
+ % tensorIds.size()
+ % layer->GetNumOutputSlots()
+ % CHECK_LOCATION().AsString()));
+ }
+
+ for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
+ {
+ std::string tensorId = tensorIds[slotIndex];
+ armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
+
+ auto it = m_TensorConnections.find(tensorId);
+
+ if (it == m_TensorConnections.end())
+ {
+ //First time seing this tensor, we need to map it
+ m_TensorConnections[tensorId] = TensorSlots();
+ }
+
+ TensorSlots & tensorSlots = m_TensorConnections[tensorId];
+
+ // assuming there is only one producer for that tensor
+ if (tensorSlots.outputSlot != nullptr)
+ {
+ throw ParseException(boost::str(
+ boost::format("Another layer has already registered itself as the producer of "
+ "tensor:%2% %3%") %
+ tensorId %
+ CHECK_LOCATION().AsString()));
+ }
+ tensorSlots.outputSlot = slot;
+ }
+}
+
+BindingPointInfo OnnxParser::GetNetworkInputBindingInfo(const std::string& name) const
+{
+ for(int i = 0; i < m_Graph->input_size(); ++i)
+ {
+ auto input = m_Graph->input(i);
+ if(input.name() == name)
+ {
+ return std::make_pair(static_cast<armnn::LayerBindingId>(i), ToTensorInfo(input));
+ }
+ }
+ throw InvalidArgumentException(boost::str(boost::format("The input layer '%1%' does not exist %2%")
+ % name % CHECK_LOCATION().AsString()));
+}
+
+BindingPointInfo OnnxParser::GetNetworkOutputBindingInfo(const std::string& name) const
+{
+ for(int i = 0; i < m_Graph->output_size(); ++i)
+ {
+ auto output = m_Graph->output(i);
+ if(output.name() == name)
+ {
+ return std::make_pair(static_cast<armnn::LayerBindingId>(i), ToTensorInfo(output));
+ }
+ }
+ throw InvalidArgumentException(boost::str(boost::format("The output layer '%1%' does not exist %2%")
+ % name % CHECK_LOCATION().AsString()));
+}
+
+std::vector<std::string> OnnxParser::GetInputs(ModelPtr& model)
+{
+ if(model == nullptr) {
+ throw InvalidArgumentException(boost::str(
+ boost::format("The given model cannot be null %1%")
+ % CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<std::string> inputNames;
+ std::map<std::string, bool> isConstant;
+ for(auto tensor : model->graph().initializer())
+ {
+ isConstant[tensor.name()] = true;
+ }
+ for(auto input : model->graph().input())
+ {
+ auto it = isConstant.find(input.name());
+ if(it == isConstant.end())
+ {
+ inputNames.push_back(input.name());
+ }
+ }
+ return inputNames;
+}
+
+std::vector<std::string> OnnxParser::GetOutputs(ModelPtr& model)
+{
+ if(model == nullptr) {
+ throw InvalidArgumentException(boost::str(
+ boost::format("The given model cannot be null %1%")
+ % CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<std::string> outputNames;
+ for(auto output : model->graph().output())
+ {
+ outputNames.push_back(output.name());
+ }
+ return outputNames;
+}
+
+} // namespace armnnOnnxParser
diff --git a/src/armnnOnnxParser/OnnxParser.hpp b/src/armnnOnnxParser/OnnxParser.hpp
new file mode 100644
index 0000000000..ee75f8e322
--- /dev/null
+++ b/src/armnnOnnxParser/OnnxParser.hpp
@@ -0,0 +1,183 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "google/protobuf/repeated_field.h"
+#include <unordered_map>
+
+#include <onnx/onnx.pb.h>
+
+
+namespace armnn
+{
+class TensorInfo;
+}
+
+namespace armnnOnnxParser
+{
+
+using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+using ModelPtr = std::unique_ptr<onnx::ModelProto>;
+
+class OnnxParser : public IOnnxParser
+{
+
+using OperationParsingFunction = void(OnnxParser::*)(const onnx::NodeProto& NodeProto);
+
+public:
+
+ using GraphPtr = std::unique_ptr<onnx::GraphProto>;
+
+ /// Create the network from a protobuf binary file on disk
+ virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) override;
+
+ /// Create the network from a protobuf text file on disk
+ virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char* graphFile) override;
+
+ /// Create the network directly from protobuf text in a string. Useful for debugging/testing
+ virtual armnn::INetworkPtr CreateNetworkFromString(const std::string& protoText) override;
+
+ /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name
+ virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const override;
+
+ /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name
+ virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const override;
+
+public:
+
+ OnnxParser();
+
+ static ModelPtr LoadModelFromBinaryFile(const char * fileName);
+ static ModelPtr LoadModelFromTextFile(const char * fileName);
+ static ModelPtr LoadModelFromString(const std::string& inputString);
+
+ ///Retrieve inputs names
+ static std::vector<std::string> GetInputs(ModelPtr& model);
+
+ ///Retrieve outputs names
+ static std::vector<std::string> GetOutputs(ModelPtr& model);
+
+private:
+
+ /// Parses a ModelProto loaded into memory from one of the other CreateNetwork*
+ armnn::INetworkPtr CreateNetworkFromModel(onnx::ModelProto& model);
+
+ ///Parse every node and make the connection between the resulting tensors
+ void LoadGraph();
+
+ void SetupInfo(const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list);
+
+ std::vector<armnn::TensorInfo> ComputeOutputInfo(std::vector<std::string> outNames,
+ const armnn::IConnectableLayer* layer,
+ std::vector<armnn::TensorShape> inputShapes);
+
+ void DetectFullyConnected();
+
+ template <typename Location>
+ void GetInputAndParam(const onnx::NodeProto& node,
+ std::string* inputName,
+ std::string* constName,
+ const Location& location);
+
+ template <typename Location>
+ void To1DTensor(const std::string &name, const Location& location);
+
+ //Broadcast Preparation functions
+ std::pair<std::string, std::string> AddPrepareBroadcast(const std::string& input0, const std::string& input1);
+ void PrependForBroadcast(const std::string& outputName, const std::string& input0, const std::string& input1);
+
+ void CreateConstantLayer(const std::string& tensorName, const std::string& layerName);
+ void CreateReshapeLayer(const std::string& inputName,
+ const std::string& outputName,
+ const std::string& layerName);
+
+ void ParseBatchNormalization(const onnx::NodeProto& node);
+ void ParseConstant(const onnx::NodeProto& nodeProto);
+
+ void ParseMaxPool(const onnx::NodeProto& nodeProto);
+ void ParseAveragePool(const onnx::NodeProto& nodeProto);
+ void ParseGlobalAveragePool(const onnx::NodeProto& node);
+
+ void AddPoolingLayer(const onnx::NodeProto& nodeProto, armnn::Pooling2dDescriptor& desc);
+
+ void ParseReshape(const onnx::NodeProto& nodeProto);
+ void ParseRelu(const onnx::NodeProto& nodeProto);
+
+ void AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const armnn::Convolution2dDescriptor& convDesc);
+ void ParseConv(const onnx::NodeProto& nodeProto);
+
+ void ParseAdd(const onnx::NodeProto& nodeProto);
+ void AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx::NodeProto* addNode = nullptr);
+
+ void RegisterInputSlots(armnn::IConnectableLayer* layer, const std::vector<std::string>& tensorIndexes);
+ void RegisterOutputSlots(armnn::IConnectableLayer* layer, const std::vector<std::string>& tensorIndexes);
+
+ void SetupInputLayers();
+ void SetupOutputLayers();
+
+ void ResetParser();
+ void Cleanup();
+
+ std::pair<armnn::ConstTensor, std::unique_ptr<float[]>> CreateConstTensor(const std::string name);
+
+ template <typename TypeList, typename Location>
+ void ValidateInputs(const onnx::NodeProto& node,
+ TypeList validInputs,
+ const Location& location);
+
+ /// The network we're building. Gets cleared after it is passed to the user
+ armnn::INetworkPtr m_Network;
+
+ ///Ptr to the graph we're building the network from
+ GraphPtr m_Graph;
+
+ ///Map of the information for every tensor
+ struct OnnxTensor
+ {
+ std::unique_ptr<armnn::TensorInfo> m_info;
+ std::unique_ptr<const onnx::TensorProto> m_tensor;
+ onnx::TensorProto::DataType m_dtype;
+
+ OnnxTensor() : m_info(nullptr), m_tensor(nullptr), m_dtype(onnx::TensorProto::FLOAT) { }
+ bool isConstant() { return m_tensor != nullptr; }
+
+ };
+
+ std::unordered_map<std::string, OnnxTensor> m_TensorsInfo;
+
+ /// map of onnx operation names to parsing member functions
+ static const std::map<std::string, OperationParsingFunction> m_ParserFunctions;
+
+ /// A mapping of an output slot to each of the input slots it should be connected to
+ /// The outputSlot is from the layer that creates this tensor as one of its ouputs
+ /// The inputSlots are from the layers that use this tensor as one of their inputs
+ struct TensorSlots
+ {
+ armnn::IOutputSlot* outputSlot;
+ std::vector<armnn::IInputSlot*> inputSlots;
+
+ TensorSlots() : outputSlot(nullptr) { }
+ };
+ ///Map of the tensor names to their connections for the connections of the layers of the graph
+ std::unordered_map<std::string, TensorSlots> m_TensorConnections;
+
+ //Map of the tensor names to their node and index in graph.node()
+ std::unordered_map<std::string, std::pair<const onnx::NodeProto*, int>> m_OutputsMap;
+
+ /// Number of times a specific node (identified by his index number) was used as input
+ /// and list of the nodes it was fused with
+ struct UsageSummary
+ {
+ std::vector<size_t> fusedWithNodes;
+ size_t inputForNodes;
+
+ UsageSummary() : fusedWithNodes({}), inputForNodes(0) { }
+
+ };
+
+ std::vector<UsageSummary> m_OutputsFusedAndUsed;
+};
+}
diff --git a/src/armnnOnnxParser/OnnxSupport.md b/src/armnnOnnxParser/OnnxSupport.md
new file mode 100644
index 0000000000..7d81e8d6aa
--- /dev/null
+++ b/src/armnnOnnxParser/OnnxSupport.md
@@ -0,0 +1,60 @@
+# ONNX operators that the Arm NN SDK supports
+
+This reference guide provides a list of ONNX operators the Arm NN SDK currently supports.
+
+The Arm NN SDK ONNX parser currently only supports fp32 operators.
+
+## Fully supported
+
+**Add**
+
+See the ONNX [Add documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Add) for more information
+
+**AveragePool**
+
+See the ONNX [AveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#AveragePool) for more information.
+
+**Constant**
+
+See the ONNX [Constant documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Constant) for more information.
+
+**GlobalAveragePool**
+
+See the ONNX [GlobalAveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#GlobalAveragePool) for more information.
+
+**MaxPool**
+
+See the ONNX [max_pool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#MaxPool) for more information.
+
+**Relu**
+
+See the ONNX [Relu documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Relu) for more information.
+
+**Reshape**
+
+See the ONNX [Reshape documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape) for more information.
+
+## Partially supported
+
+**Conv**
+
+The parser only supports 2D convolutions with a dilation rate of [1, 1] and group = 1 or group = #Nb_of_channel (depthwise convolution)
+See the ONNX [Conv documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Conv) for more information.
+
+**BatchNormalization**
+
+The parser does not support training mode. See the ONNX [BatchNormalization documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#BatchNormalization) for more information.
+
+**MatMul**
+
+The parser only supports constant weights in a fully connected layer.
+
+## Tested networks
+
+Arm tested these operators with the following ONNX fp32 neural networks:
+
+* Simple MNIST. See the ONNX [MNIST documentation](https://github.com/onnx/models/tree/master/mnist) for more information.
+
+* Mobilenet_v2. See the ONNX [MobileNet documentation](https://github.com/onnx/models/tree/master/models/image_classification/mobilenet) for more information.
+
+More machine learning operators will be supported in future releases. \ No newline at end of file
diff --git a/src/armnnOnnxParser/README.md b/src/armnnOnnxParser/README.md
new file mode 100644
index 0000000000..81ca068a86
--- /dev/null
+++ b/src/armnnOnnxParser/README.md
@@ -0,0 +1,5 @@
+#Arm NN ONNX parser
+
+`armnnOnnxParser` is a library for loading neural networks defined in ONNX protobuf files into the Arm NN runtime.
+
+For more information about the ONNX layers that are supported, and the networks that have been tested, see [OnnxSupport.md](./OnnxSupport.md). \ No newline at end of file
diff --git a/src/armnnOnnxParser/test/Addition.cpp b/src/armnnOnnxParser/test/Addition.cpp
new file mode 100644
index 0000000000..25519447c6
--- /dev/null
+++ b/src/armnnOnnxParser/test/Addition.cpp
@@ -0,0 +1,311 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct AddMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ AddMainFixture(const std::string& dataType)
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input0"
+ type {
+ tensor_type {
+ elem_type: )" + dataType + R"(
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Input1"
+ type {
+ tensor_type {
+ elem_type: )" + dataType + R"(
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input0"
+ input: "Input1"
+ output: "Output"
+ name: "addition"
+ op_type: "Add"
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ }
+};
+
+struct AddValidFixture : AddMainFixture
+{
+ AddValidFixture() : AddMainFixture("FLOAT") {
+ Setup();
+ }
+};
+
+struct AddInvalidFixture : AddMainFixture
+{
+ AddInvalidFixture() : AddMainFixture("INT32") { }
+};
+
+struct AddValidBroadcastFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ AddValidBroadcastFixture() {
+
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input0"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Input1"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input0"
+ input: "Input1"
+ output: "Output"
+ name: "addition"
+ op_type: "Add"
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+struct AddInvalidBroadcastFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ AddInvalidBroadcastFixture() {
+
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input0"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Input1"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input0"
+ input: "Input1"
+ output: "Output"
+ name: "addition"
+ op_type: "Add"
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidAddTest, AddValidFixture)
+{
+ RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
+ {"Input1", {1.0f, 2.0f, 3.0, 4.0f}}}, {{"Output", {2.0, 4.0, 0, 0.0}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeAdd, AddInvalidFixture)
+{
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(InvalidBroadcastAdd, AddInvalidBroadcastFixture)
+{
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+BOOST_FIXTURE_TEST_CASE(ValidBroadcastAdd, AddValidBroadcastFixture)
+{
+ RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
+ {"Input1", {1.0f, 2.0f, 3.0, 4.0f}}}, {{"Output", {2.0, 4.0, 0, 0.0}}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/BatchNorm.cpp b/src/armnnOnnxParser/test/BatchNorm.cpp
new file mode 100644
index 0000000000..b708770895
--- /dev/null
+++ b/src/armnnOnnxParser/test/BatchNorm.cpp
@@ -0,0 +1,342 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct BatchNormalizationMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ BatchNormalizationMainFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "mean"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "var"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "scale"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "bias"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ input: "scale"
+ input: "bias"
+ input: "mean"
+ input: "var"
+ output: "Output"
+ name: "batchNorm"
+ op_type: "BatchNormalization"
+ attribute {
+ name: "epsilon"
+ f: 0.0010000000475
+ type: FLOAT
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 5.0
+ name: "mean"
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 2.0
+ name: "var"
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 0.0
+ name: "bias"
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 1.0
+ name: "scale"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidBatchNormalizationTest, BatchNormalizationMainFixture)
+{
+ RunTest<4>({{"Input", {1, 2, 3, 4, 5, 6, 7, 8, 9}}}, // Input data.
+ {{"Output", {-2.8277204f, -2.12079024f, -1.4138602f,
+ -0.7069301f, 0.0f, 0.7069301f,
+ 1.4138602f, 2.12079024f, 2.8277204f}}}); // Expected output data.
+}
+
+
+struct BatchNormalizationBisFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ BatchNormalizationBisFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "mean"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "var"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "scale"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "bias"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ input: "scale"
+ input: "bias"
+ input: "mean"
+ input: "var"
+ output: "Output"
+ name: "batchNorm"
+ op_type: "BatchNormalization"
+ attribute {
+ name: "epsilon"
+ f: 0.00001
+ type: FLOAT
+ }
+ }
+ initializer {
+ dims: 2
+ data_type: FLOAT
+ float_data: 0.0
+ float_data: 3.0
+ name: "mean"
+ }
+ initializer {
+ dims: 2
+ data_type: FLOAT
+ float_data: 1.0
+ float_data: 1.5
+ name: "var"
+ }
+ initializer {
+ dims: 2
+ data_type: FLOAT
+ float_data: 0.0
+ float_data: 1.0
+ name: "bias"
+ }
+ initializer {
+ dims: 2
+ data_type: FLOAT
+ float_data: 1.0
+ float_data: 1.5
+ name: "scale"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidBatchNormalizationBisTest, BatchNormalizationBisFixture)
+{
+ RunTest<4>({{"Input", {-1, 0.0, 1, 2, 3.0, 4.0}}}, // Input data.
+ {{"Output", {-0.999995f, 0.0, 0.999995f,
+ -0.22474074f, 1.0f, 2.2247407f}}}); // Expected output data.
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/Const.cpp b/src/armnnOnnxParser/test/Const.cpp
new file mode 100644
index 0000000000..594998771b
--- /dev/null
+++ b/src/armnnOnnxParser/test/Const.cpp
@@ -0,0 +1,87 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct ConstMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ ConstMainFixture(const std::string& dataType)
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ node {
+ output: "Output"
+ attribute {
+ name: "value"
+ t {
+ dims: 7
+ data_type: )" + dataType + R"(
+ float_data: 0.0
+ float_data: 1.0
+ float_data: 2.0
+ float_data: 3.0
+ float_data: 4.0
+ float_data: 5.0
+ float_data: 6.0
+
+ }
+ type: FLOAT
+ }
+ name: "constantNode"
+ op_type: "Constant"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 7
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ }
+};
+
+struct ConstValidFixture : ConstMainFixture
+{
+ ConstValidFixture() : ConstMainFixture("FLOAT") {
+ Setup();
+ }
+};
+
+struct ConstInvalidFixture : ConstMainFixture
+{
+ ConstInvalidFixture() : ConstMainFixture("FLOAT16") { }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidConstTest, ConstValidFixture)
+{
+ RunTest<1>({ }, {{ "Output" , {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeConst, ConstInvalidFixture)
+{
+ BOOST_CHECK_THROW( Setup(), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/Constructor.cpp b/src/armnnOnnxParser/test/Constructor.cpp
new file mode 100644
index 0000000000..e234dba5ee
--- /dev/null
+++ b/src/armnnOnnxParser/test/Constructor.cpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+BOOST_AUTO_TEST_CASE(Create)
+{
+ armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/Conv2D.cpp b/src/armnnOnnxParser/test/Conv2D.cpp
new file mode 100644
index 0000000000..11a5d1eb87
--- /dev/null
+++ b/src/armnnOnnxParser/test/Conv2D.cpp
@@ -0,0 +1,469 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct SimpleConv2DFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ SimpleConv2DFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ dims: 1
+ dims: 3
+ dims: 3
+ data_type: FLOAT
+ float_data: 2
+ float_data: 1
+ float_data: 0
+ float_data: 6
+ float_data: 2
+ float_data: 1
+ float_data: 4
+ float_data: 1
+ float_data: 2
+ name: "Weight"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "Output"
+ name: "Convolution"
+ op_type: "Conv"
+ attribute {
+ name: "kernel_shape"
+ ints: 3
+ ints: 3
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "auto_pad"
+ s: "VALID"
+ type: STRING
+ }
+ attribute {
+ name: "group"
+ i: 1
+ type: INT
+ }
+ attribute {
+ name: "dilations"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+struct Conv2DWithBiasesFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ Conv2DWithBiasesFixture() {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ dims: 1
+ dims: 2
+ dims: 2
+ data_type: FLOAT
+ float_data: 2
+ float_data: 1
+ float_data: 0
+ float_data: 6
+ name: "Weight"
+ }
+ input {
+ name: "Bias"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 4
+ data_type: FLOAT
+ float_data: 10
+ float_data: 0
+ float_data: 0
+ float_data: 0
+ name: "Bias"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ input: "Bias"
+ output: "Output"
+ name: "Convolution"
+ op_type: "Conv"
+ attribute {
+ name: "kernel_shape"
+ ints: 2
+ ints: 2
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "auto_pad"
+ s: "SAME_UPPER"
+ type: STRING
+ }
+ attribute {
+ name: "group"
+ i: 1
+ type: INT
+ }
+ attribute {
+ name: "dilations"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+
+struct Conv2DDimReducingFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ Conv2DDimReducingFixture() {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 2
+ dims: 3
+ dims: 1
+ dims: 1
+ data_type: FLOAT
+ float_data: -1
+ float_data: 2
+ float_data: 0
+ float_data: 1
+ float_data: 0
+ float_data: 0
+ name: "Weight"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "Output"
+ name: "Convolution"
+ op_type: "Conv"
+ attribute {
+ name: "kernel_shape"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "group"
+ i: 1
+ type: INT
+ }
+ attribute {
+ name: "dilations"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidConvTest, SimpleConv2DFixture)
+{
+ RunTest<4>({{"Input", {1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0,
+ 7.0, 8.0, 9.0}}},
+ {{"Output", {1.0 * 2 + 2.0 * 1 + 3.0 * 0 +
+ 4.0 * 6 + 5.0 * 2 + 6.0 * 1 +
+ 7.0 * 4 + 8.0 * 1 + 9.0 * 2}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(ValidConvWithBiasTest, Conv2DWithBiasesFixture)
+{
+ RunTest<4>({{"Input", {1.0, 2.0,
+ 3.0, 4.0}}},
+ {{"Output", {1.0 * 2 + 2.0 * 1 + 3.0 * 0 + 4 * 6 + 10,
+ 2.0 * 2 + 0 * 1 + 4.0 * 0 + 0 * 6 + 10,
+ 3.0 * 2 + 4.0 * 1 + 0 * 0 + 0 * 6 + 10,
+ 4.0 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(ValidConvDimReducTest, Conv2DDimReducingFixture)
+{
+ RunTest<4>({{"Input", {1.0, 2.0, 3.0, 4.0, -1, -2, 3, 4, 1 , 1, 1, 1 }}},
+ {{"Output", {-1 * 1 + 2 * -1, -1 * 2 + 2 * -2,
+ -1 * 3 + 2 * 3, -1 * 4 + 2 * 4,
+ 1, 2, 3, 4}}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/CreateNetwork.cpp b/src/armnnOnnxParser/test/CreateNetwork.cpp
new file mode 100644
index 0000000000..d11f7603b2
--- /dev/null
+++ b/src/armnnOnnxParser/test/CreateNetwork.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "google/protobuf/stubs/logging.h"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+BOOST_AUTO_TEST_CASE(CreateNetworkFromString)
+{
+ std::string TestModel = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+
+ armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
+
+ armnn::INetworkPtr network = parser->CreateNetworkFromString(TestModel.c_str());
+ BOOST_TEST(network.get());
+}
+
+BOOST_AUTO_TEST_CASE(CreateNetworkFromStringWithNullptr)
+{
+ armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString(""), armnn::InvalidArgumentException );
+}
+
+BOOST_AUTO_TEST_CASE(CreateNetworkWithInvalidString)
+{
+ auto silencer = google::protobuf::LogSilencer(); //get rid of errors from protobuf
+ armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString( "I'm not a model so I should raise an error" ),
+ armnn::ParseException );
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/DepthConv.cpp b/src/armnnOnnxParser/test/DepthConv.cpp
new file mode 100644
index 0000000000..64b0778abc
--- /dev/null
+++ b/src/armnnOnnxParser/test/DepthConv.cpp
@@ -0,0 +1,162 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct SimpleDepthConv2DFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ SimpleDepthConv2DFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 3
+ dims: 1
+ dims: 2
+ dims: 2
+ data_type: FLOAT
+ float_data: 1
+ float_data: 1
+ float_data: 1
+ float_data: 1
+ float_data: 2
+ float_data: 2
+ float_data: 2
+ float_data: 2
+ float_data: 3
+ float_data: 3
+ float_data: 3
+ float_data: 3
+ name: "Weight"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "Output"
+ name: "Convolution"
+ op_type: "Conv"
+ attribute {
+ name: "kernel_shape"
+ ints: 2
+ ints: 2
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "auto_pad"
+ s: "VALID"
+ type: STRING
+ }
+ attribute {
+ name: "group"
+ i: 3
+ type: INT
+ }
+ attribute {
+ name: "dilations"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+
+BOOST_FIXTURE_TEST_CASE(ValidDepthConvTest, SimpleDepthConv2DFixture)
+{
+ RunTest<4>({{"Input", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}}},
+ {{"Output", { 10, 52, 126 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/FullyConnected.cpp b/src/armnnOnnxParser/test/FullyConnected.cpp
new file mode 100644
index 0000000000..cbb6c355a4
--- /dev/null
+++ b/src/armnnOnnxParser/test/FullyConnected.cpp
@@ -0,0 +1,597 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+// A MatMul in isolation, not connected to an add. Should result in a non-biased FullyConnected layer.
+struct MatMulFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ MatMulFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Const"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 17.0
+ name: "Const"
+ }
+ node {
+ input: "Input"
+ input: "Const"
+ output: "Output"
+ name: "SimpleMatmul"
+ op_type: "MatMul"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MatMul, MatMulFixture)
+{
+ RunTest<1>({{"Input", { 2 }}}, {{"Output", { 34 }}});
+}
+
+// In Onnx fully connected layers are expressed as a MatMul followed by an Add.
+// The OnnxParser must detect this case and convert them to a FullyConnected layer.
+struct FullyConnectedFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ FullyConnectedFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 2
+ name: "Weight"
+ }
+ input {
+ name: "Bias"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 1
+ name: "Bias"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "AddInput"
+ name: "FCMatmul"
+ op_type: "MatMul"
+ }
+ node {
+ input: "AddInput"
+ input: "Bias"
+ output: "Output"
+ name: "FCAdd"
+ op_type: "Add"
+ }
+ value_info {
+ name: "AddInput"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(FullyConnected, FullyConnectedFixture)
+{
+ RunTest<1>({{"Input", { 3 }}}, {{"Output", { 7 }}});
+}
+
+
+// Similar to FullyConnectedFixture, but this time the MatMul's output is used by two Adds. This should result
+// in two FullyConnected layers being created.
+// I
+// |
+// M -- C
+// / \'
+// C-- A A -- C
+// \ /
+// A
+struct MatMulUsedInTwoFcFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ MatMulUsedInTwoFcFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 2
+ name: "Weight"
+ }
+ input {
+ name: "Bias"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 1
+ name: "Bias"
+ }
+ input {
+ name: "Bias_1"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 10.0
+ name: "Bias_1"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "AddInput"
+ name: "FCMatmul"
+ op_type: "MatMul"
+ }
+ node {
+ input: "AddInput"
+ input: "Bias"
+ output: "AddOutput"
+ name: "FCAdd"
+ op_type: "Add"
+ }
+ node {
+ input: "AddInput"
+ input: "Bias_1"
+ output: "AddOutput_1"
+ name: "FCAdd_1"
+ op_type: "Add"
+ }
+ node {
+ input: "AddOutput"
+ input: "AddOutput_1"
+ output: "Output"
+ name: "FinalAdd"
+ op_type: "Add"
+ }
+ value_info {
+ name: "AddInput"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ value_info {
+ name: "AddOutput"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ value_info {
+ name: "AddOutput_1"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture)
+{
+ RunTest<1>({{"Input", { 3 }}}, {{"Output", { 23 }}});
+}
+
+
+// Similar to MatMulUsedInTwoFc, but this time the Adds are 'staggered' (see diagram), which means that only one
+// FullyConnected layer can be created (the other should just be an Add).
+// I
+// |
+// M -- C1
+// / \'
+// C2 -- A |
+// \ /
+// A
+struct MatMulUsedInTwoFcStaggeredFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ MatMulUsedInTwoFcStaggeredFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 2
+ name: "Weight"
+ }
+ input {
+ name: "Bias"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: FLOAT
+ float_data: 1
+ name: "Bias"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "AddInput"
+ name: "MatmulFC&NFC"
+ op_type: "MatMul"
+ }
+ node {
+ input: "AddInput"
+ input: "Bias"
+ output: "AddOutput"
+ name: "FCAdd"
+ op_type: "Add"
+ }
+
+ node {
+ input: "AddInput"
+ input: "AddOutput"
+ output: "Output"
+ name: "FinalAdd"
+ op_type: "Add"
+ }
+ value_info {
+ name: "AddInput"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ value_info {
+ name: "AddOutput"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFcStaggered, MatMulUsedInTwoFcStaggeredFixture)
+{
+ RunTest<1>({{"Input", { 3 }}}, {{"Output", { 13 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/GetInputsOutputs.cpp b/src/armnnOnnxParser/test/GetInputsOutputs.cpp
new file mode 100644
index 0000000000..2e605a6322
--- /dev/null
+++ b/src/armnnOnnxParser/test/GetInputsOutputs.cpp
@@ -0,0 +1,255 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "../OnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+#include <onnx/onnx.pb.h>
+#include "google/protobuf/stubs/logging.h"
+
+
+using ModelPtr = std::unique_ptr<onnx::ModelProto>;
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct GetInputsOutputsMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ explicit GetInputsOutputsMainFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ output: "Output"
+ name: "ActivationLayer"
+ op_type: "Relu"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+
+BOOST_FIXTURE_TEST_CASE(GetInput, GetInputsOutputsMainFixture)
+{
+ ModelPtr model = armnnOnnxParser::OnnxParser::LoadModelFromString(m_Prototext.c_str());
+ std::vector<std::string> tensors = armnnOnnxParser::OnnxParser::GetInputs(model);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ BOOST_CHECK_EQUAL("Input", tensors[0]);
+
+}
+
+BOOST_FIXTURE_TEST_CASE(GetOutput, GetInputsOutputsMainFixture)
+{
+ ModelPtr model = armnnOnnxParser::OnnxParser::LoadModelFromString(m_Prototext.c_str());
+ std::vector<std::string> tensors = armnnOnnxParser::OnnxParser::GetOutputs(model);
+ BOOST_CHECK_EQUAL(1, tensors.size());
+ BOOST_CHECK_EQUAL("Output", tensors[0]);
+}
+
+struct GetEmptyInputsOutputsFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ GetEmptyInputsOutputsFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ node {
+ output: "Output"
+ attribute {
+ name: "value"
+ t {
+ dims: 7
+ data_type: FLOAT
+ float_data: 0.0
+ float_data: 1.0
+ float_data: 2.0
+ float_data: 3.0
+ float_data: 4.0
+ float_data: 5.0
+ float_data: 6.0
+
+ }
+ type: FLOAT
+ }
+ name: "constantNode"
+ op_type: "Constant"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 7
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
+{
+ ModelPtr model = armnnOnnxParser::OnnxParser::LoadModelFromString(m_Prototext.c_str());
+ std::vector<std::string> tensors = armnnOnnxParser::OnnxParser::GetInputs(model);
+ BOOST_CHECK_EQUAL(0, tensors.size());
+}
+
+BOOST_AUTO_TEST_CASE(GetInputsNullModel)
+{
+ BOOST_CHECK_THROW(armnnOnnxParser::OnnxParser::LoadModelFromString(""), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
+{
+ auto silencer = google::protobuf::LogSilencer(); //get rid of errors from protobuf
+ BOOST_CHECK_THROW(armnnOnnxParser::OnnxParser::LoadModelFromString("nknnk"), armnn::ParseException);
+}
+
+struct GetInputsMultipleFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ GetInputsMultipleFixture() {
+
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input0"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Input1"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input0"
+ input: "Input1"
+ output: "Output"
+ name: "addition"
+ op_type: "Add"
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsMultipleFixture)
+{
+ ModelPtr model = armnnOnnxParser::OnnxParser::LoadModelFromString(m_Prototext.c_str());
+ std::vector<std::string> tensors = armnnOnnxParser::OnnxParser::GetInputs(model);
+ BOOST_CHECK_EQUAL(2, tensors.size());
+ BOOST_CHECK_EQUAL("Input0", tensors[0]);
+ BOOST_CHECK_EQUAL("Input1", tensors[1]);
+}
+
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/Pooling.cpp b/src/armnnOnnxParser/test/Pooling.cpp
new file mode 100644
index 0000000000..8e2f0fee00
--- /dev/null
+++ b/src/armnnOnnxParser/test/Pooling.cpp
@@ -0,0 +1,310 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct PoolingMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ PoolingMainFixture(const std::string& dataType, const std::string& op)
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: )" + dataType + R"(
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ output: "Output"
+ name: "Pooling"
+ op_type: )" + op + R"(
+ attribute {
+ name: "kernel_shape"
+ ints: 2
+ ints: 2
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "pads"
+ ints: 0
+ ints: 0
+ ints: 0
+ ints: 0
+ type: INTS
+ }
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ }
+};
+
+struct MaxPoolValidFixture : PoolingMainFixture
+{
+ MaxPoolValidFixture() : PoolingMainFixture("FLOAT", "\"MaxPool\"") {
+ Setup();
+ }
+};
+
+struct MaxPoolInvalidFixture : PoolingMainFixture
+{
+ MaxPoolInvalidFixture() : PoolingMainFixture("FLOAT16", "\"MaxPool\"") { }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidMaxPoolTest, MaxPoolValidFixture)
+{
+ RunTest<4>({{"Input", {1.0f, 2.0f, 3.0f, -4.0f}}}, {{"Output", {3.0f}}});
+}
+
+struct AvgPoolValidFixture : PoolingMainFixture
+{
+ AvgPoolValidFixture() : PoolingMainFixture("FLOAT", "\"AveragePool\"") {
+ Setup();
+ }
+};
+
+struct PoolingWithPadFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ PoolingWithPadFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ output: "Output"
+ name: "Pooling"
+ op_type: "AveragePool"
+ attribute {
+ name: "kernel_shape"
+ ints: 4
+ ints: 4
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "pads"
+ ints: 1
+ ints: 1
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "count_include_pad"
+ i: 1
+ type: INT
+ }
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(AveragePoolValid, AvgPoolValidFixture)
+{
+ RunTest<4>({{"Input", {1.0f, 2.0f, 3.0f, -4.0f}}}, {{"Output", {0.5}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(ValidAvgWithPadTest, PoolingWithPadFixture)
+{
+ RunTest<4>({{"Input", {1.0f, 2.0f, 3.0f, -4.0f}}}, {{"Output", {1.0/8.0}}});
+}
+
+struct GlobalAvgFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ GlobalAvgFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ output: "Output"
+ name: "Pooling"
+ op_type: "GlobalAveragePool"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(GlobalAvgTest, GlobalAvgFixture)
+{
+ RunTest<4>({{"Input", {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}}}, {{"Output", {10/4.0, 26/4.0}}});
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeMaxPool, MaxPoolInvalidFixture)
+{
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/ProtoxtFixture.cpp b/src/armnnOnnxParser/test/ProtoxtFixture.cpp
new file mode 100644
index 0000000000..2bfeadf2e3
--- /dev/null
+++ b/src/armnnOnnxParser/test/ProtoxtFixture.cpp
@@ -0,0 +1,81 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct ProtoxtTestFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ ProtoxtTestFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK "
+ producer_version: "2.5.1 "
+ domain: "ai.cntk "
+ model_version: 1
+ graph {
+ name: "CNTKGraph "
+ node {
+ input: "Input"
+ output: "Output"
+ name: "Plus112"
+ op_type: "Add "
+ }
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ // Setup();
+ }
+};
+
+
+BOOST_FIXTURE_TEST_CASE(ProtoxtTest, ProtoxtTestFixture)
+{
+ //TODO : add a test to check if the inputs and outputs are correctly inferred.
+}
+
+BOOST_FIXTURE_TEST_CASE(ProtoxtTestWithBadInputs, ProtoxtTestFixture)
+{
+
+ // BOOST_CHECK_THROW(RunTest<4>({{ "InexistantInput" , {0.0, 1.0, 2.0, 3.0}}},
+ // {{ "InexistantOutput" , {0.0, 1.0, 2.0, 3.0}}}),
+ // armnn::InvalidArgumentException );
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/Relu.cpp b/src/armnnOnnxParser/test/Relu.cpp
new file mode 100644
index 0000000000..991f64c3fc
--- /dev/null
+++ b/src/armnnOnnxParser/test/Relu.cpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct ReluMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ ReluMainFixture()
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ output: "Output"
+ name: "ActivationLayer"
+ op_type: "Relu"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ Setup();
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidReluTest, ReluMainFixture)
+{
+ RunTest<1>({{"Input", { -1.0f, -0.5f, 1.25f, -3.0f}}},
+ {{ "Output", { 0.0f, 0.0f, 1.25f, 0.0f}}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnOnnxParser/test/Reshape.cpp b/src/armnnOnnxParser/test/Reshape.cpp
new file mode 100644
index 0000000000..a740bb0ff3
--- /dev/null
+++ b/src/armnnOnnxParser/test/Reshape.cpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(OnnxParser)
+
+struct ReshapeMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
+{
+ ReshapeMainFixture(const std::string& dataType)
+ {
+ m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: )" + dataType + R"(
+ shape {
+ dim {
+ dim_value: 4
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Shape"
+ type {
+ tensor_type {
+ elem_type: INT64
+ shape {
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ node {
+ input: "Input"
+ input: "Shape"
+ output: "Output"
+ name: "reshape"
+ op_type: "Reshape"
+
+ }
+ initializer {
+ dims: 2
+ data_type: INT64
+ int64_data: 2
+ int64_data: 2
+ name: "Shape"
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: FLOAT
+ shape {
+ dim {
+ dim_value: 2
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+ }
+};
+
+struct ReshapeValidFixture : ReshapeMainFixture
+{
+ ReshapeValidFixture() : ReshapeMainFixture("FLOAT") {
+ Setup();
+ }
+};
+
+struct ReshapeInvalidFixture : ReshapeMainFixture
+{
+ ReshapeInvalidFixture() : ReshapeMainFixture("FLOAT16") { }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidReshapeTest, ReshapeValidFixture)
+{
+ RunTest<2>({{"Input", { 0.0f, 1.0f, 2.0f, 3.0f }}}, {{"Output", { 0.0f, 1.0f, 2.0f, 3.0f }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeReshape, ReshapeInvalidFixture)
+{
+ BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()