8 #include "google/protobuf/repeated_field.h" 9 #include <unordered_map> 11 #include <onnx/onnx.pb.h> 23 using ModelPtr = std::unique_ptr<onnx::ModelProto>;
28 using OperationParsingFunction = void(
OnnxParserImpl::*)(
const onnx::NodeProto& NodeProto);
32 using GraphPtr = std::unique_ptr<onnx::GraphProto>;
39 const std::map<std::string, armnn::TensorShape>& inputShapes);
46 const std::map<std::string, armnn::TensorShape>& inputShapes);
54 const std::map<std::string, armnn::TensorShape>& inputShapes);
67 static ModelPtr LoadModelFromBinaryFile(
const char * fileName);
68 static ModelPtr LoadModelFromTextFile(
const char * fileName);
69 static ModelPtr LoadModelFromString(
const std::string& inputString);
72 static std::vector<std::string> GetInputs(
ModelPtr& model);
75 static std::vector<std::string> GetOutputs(
ModelPtr& model);
88 void SetupInfo(
const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list);
90 std::vector<armnn::TensorInfo> ComputeOutputInfo(
91 std::vector<std::string> outNames,
93 std::vector<armnn::TensorShape> inputShapes,
96 void DetectFullyConnected();
98 template <
typename Location>
99 void GetInputAndParam(
const onnx::NodeProto& node,
100 std::string* inputName,
101 std::string* constName,
102 const Location& location);
104 template <
typename Location>
105 void To1DTensor(
const std::string &name,
const Location& location);
108 std::pair<std::string, std::string> AddPrepareBroadcast(
const std::string& input0,
const std::string& input1);
109 void PrependForBroadcast(
const std::string& outputName,
const std::string& input0,
const std::string& input1);
112 void AddFullyConnected(
const onnx::NodeProto& matmulNode,
const onnx::NodeProto* addNode =
nullptr);
116 void CreateInt64ConstantLayer(
const std::string& tensorName,
const std::string& layerName);
118 const std::string& outputName,
119 const std::string& layerName);
122 void ParseClip(
const onnx::NodeProto& nodeProto);
123 void ParseSigmoid(
const onnx::NodeProto& nodeProto);
124 void ParseTanh(
const onnx::NodeProto& nodeProto);
125 void ParseRelu(
const onnx::NodeProto& nodeProto);
126 void ParseLeakyRelu(
const onnx::NodeProto& nodeProto);
128 void ParseAdd(
const onnx::NodeProto& nodeProto);
129 void ParseAveragePool(
const onnx::NodeProto& nodeProto);
130 void ParseBatchNormalization(
const onnx::NodeProto& node);
131 void ParseConcat(
const onnx::NodeProto& nodeProto);
132 void ParseConstant(
const onnx::NodeProto& nodeProto);
133 void ParseConv(
const onnx::NodeProto& nodeProto);
134 void ParseFlatten(
const onnx::NodeProto& node);
135 void ParseGather(
const onnx::NodeProto& node);
136 void ParseGemm(
const onnx::NodeProto& node);
137 void ParseGlobalAveragePool(
const onnx::NodeProto& node);
138 void ParseMaxPool(
const onnx::NodeProto& nodeProto);
139 void ParseShape(
const onnx::NodeProto& node);
140 void ParseReshape(
const onnx::NodeProto& nodeProto);
141 void ParseUnsqueeze(
const onnx::NodeProto& nodeProto);
144 const std::string& tensorId,
145 unsigned int slotIndex);
149 void SetupInputLayers();
150 void SetupOutputLayers();
155 std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
159 std::pair<armnn::ConstTensor, std::unique_ptr<int32_t[]>>
160 CreateInt64ConstTensor(
const std::string name,
163 template <
typename TypeList,
typename Location>
164 void ValidateInputs(
const onnx::NodeProto& node,
165 TypeList validInputs,
166 const Location& location);
177 std::unique_ptr<armnn::TensorInfo> m_info;
178 std::unique_ptr<const onnx::TensorProto> m_tensor;
181 OnnxTensor() : m_info(nullptr), m_tensor(nullptr), m_dtype(onnx::TensorProto::FLOAT) { }
182 bool isConstant() {
return m_tensor !=
nullptr; }
185 std::unordered_map<std::string, OnnxTensor> m_TensorsInfo;
188 static const std::map<std::string, OperationParsingFunction> m_ParserFunctions;
196 std::vector<armnn::IInputSlot*> inputSlots;
198 TensorSlots() : outputSlot(nullptr) { }
201 std::unordered_map<std::string, TensorSlots> m_TensorConnections;
204 std::unordered_map<std::string, std::pair<const onnx::NodeProto*, int>> m_OutputsMap;
210 std::vector<size_t> fusedWithNodes;
211 size_t inputForNodes;
213 UsageSummary() : fusedWithNodes({}), inputForNodes(0) { }
217 std::vector<UsageSummary> m_OutputsFusedAndUsed;
219 std::map<std::string, armnn::TensorShape> m_InputShapes;
221 std::unordered_map<std::string, armnn::TensorInfo> m_InputInfos;
223 std::unordered_map<std::string, armnn::TensorInfo> m_OutputInfos;
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
A Convolution2dDescriptor for the Convolution2dLayer.
flatbuffers::Offset< ConstTensor > CreateConstTensor(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::TensorInfo > info=0, armnnSerializer::ConstTensorData data_type=armnnSerializer::ConstTensorData_NONE, flatbuffers::Offset< void > data=0)
Copyright (c) 2021 ARM Limited and Contributors.
const std::string GetVersion()
std::unique_ptr< onnx::GraphProto > GraphPtr
std::unique_ptr< onnx::ModelProto > ModelPtr
flatbuffers::Offset< ReshapeLayer > CreateReshapeLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ReshapeDescriptor > descriptor=0)
An output connection slot for a layer.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
flatbuffers::Offset< ConstantLayer > CreateConstantLayer(flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset< armnnSerializer::LayerBase > base=0, flatbuffers::Offset< armnnSerializer::ConstTensor > input=0)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
armnn::BindingPointInfo BindingPointInfo
A Pooling2dDescriptor for the Pooling2dLayer.