11 #include <boost/format.hpp> 12 #include <boost/numeric/conversion/cast.hpp> 14 #include <google/protobuf/text_format.h> 15 #include <google/protobuf/io/zero_copy_stream_impl.h> 19 using namespace armnn;
25 void CheckValidDataType(std::initializer_list<onnx::TensorProto::DataType> validInputTypes,
27 const char* validExpr,
29 std::string tensorName,
32 bool isValid = std::any_of(validInputTypes.begin(),
33 validInputTypes.end(),
39 boost::format(
"Datatype %1% is not valid for tensor '%2%' of node '%3%', not in {%4%}. %5%") %
40 onnx::TensorProto::DataType_Name(actualValue) %
48 #define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL, ...) \ 49 CheckValidDataType({__VA_ARGS__}, ACTUAL, #__VA_ARGS__, NODE, TENSOR, CHECK_LOCATION()) 51 using StrTypeListPair = std::pair<const char*, std::initializer_list<onnx::TensorProto::DataType>>;
52 #define STR_LIST(...) StrTypeListPair(#__VA_ARGS__, {__VA_ARGS__}) 54 template <
typename Callable>
55 void ReadMandatoryNodeAttributeImpl(
const onnx::NodeProto& node,
56 const std::string& attribName,
57 onnx::AttributeProto::AttributeType expectedType,
60 auto attribs = node.attribute();
62 while (attriNum < node.attribute_size())
64 if (attribs.Get(attriNum).name() == attribName)
66 if (attribs.Get(attriNum).type() == expectedType)
68 callable(attribs.Get(attriNum));
73 "Attribute %1% of node %2% expected to have %3% as onnx::AttributeProto::AttributeType, " 74 "but found %4% instead %5%")
77 % onnx::AttributeProto::AttributeType_Name(expectedType)
78 % onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type())
85 if (attriNum == node.attribute_size())
87 throw ParseException(boost::str(boost::format(
"Could not find required attribute %1% in node %2% %3%")
92 template <
typename Callable>
93 void ReadOptionalNodeAttributeImpl(
const onnx::NodeProto& node,
94 const std::string& attribName,
95 onnx::AttributeProto::AttributeType expectedType,
98 auto attribs = node.attribute();
99 for (
int attriNum = 0; attriNum < node.attribute_size(); ++attriNum)
101 if (attribs.Get(attriNum).name() == attribName)
103 if (attribs.Get(attriNum).type() == expectedType)
105 callable(attribs.Get(attriNum));
110 "Attribute %1% of node %2% expected to have %3% as onnx::AttributeProto::AttributeType, " 111 "but found %4% instead %5%")
114 % onnx::AttributeProto::AttributeType_Name(expectedType)
115 % onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type())
122 int64_t ReadOptionalNodeInt64Attribute(
const onnx::NodeProto& node,
123 const std::string& name,
124 const int64_t defaultValue = 0)
126 int64_t attribValue = defaultValue;
127 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
128 [&attribValue](
const onnx::AttributeProto& attrValue)
130 attribValue = attrValue.i();
135 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(
const onnx::NodeProto& node,
136 const std::string& name)
138 std::vector<uint32_t> attriList;
139 ReadMandatoryNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
140 [&attriList](
const onnx::AttributeProto& attrValue)
142 for (
int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
150 uint32_t ReadOptionalNodeUint32Attribute(
const onnx::NodeProto& node,
151 const std::string& name,
152 const uint32_t defaultVal = 0u)
154 uint32_t attribValue = defaultVal;
155 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
156 [&attribValue](
const onnx::AttributeProto& attrValue)
163 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(
const onnx::NodeProto& node,
164 const std::string& name)
166 std::vector<uint32_t> attriList;
167 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
168 [&attriList](
const onnx::AttributeProto& attrValue)
170 for (
int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
179 float ReadOptionalNodeFloatAttribute(
const onnx::NodeProto& node,
180 const std::string& name,
181 const float defaultValue = 0.0f)
183 float attribValue = defaultValue;
184 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::FLOAT,
185 [&attribValue](
const onnx::AttributeProto& attrValue)
187 attribValue = attrValue.f();
192 std::string ReadOptionalNodeStringAttribute(
const onnx::NodeProto& node,
const std::string& name)
194 std::string attribValue =
"";
195 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::STRING,
196 [&attribValue](
const onnx::AttributeProto& attrValue)
198 attribValue = attrValue.s();
208 case onnx::TensorProto::FLOAT:
213 case onnx::TensorProto::INT32:
214 case onnx::TensorProto::INT64:
223 boost::format(
"'%1%' is not a currently supported datatype for tensor %2%." 224 " Supported dataTypes are FLOAT, INT32 and INT64. %3%") %
225 onnx::TensorProto::DataType_Name(
226 static_cast<onnx::TensorProto::DataType>(data_type)) %
243 const onnx::TensorShapeProto onnxShape = info.type().tensor_type().shape();
244 std::vector<unsigned int> shapeDims;
245 for (
int i = 0; i < onnxShape.dim_size(); ++i)
250 if (shapeDims.empty())
252 shapeDims.push_back(1);
255 return ToTensorInfo(info.name(), shapeDims, info.type().tensor_type().elem_type());
260 std::vector<unsigned int> shapeDims;
262 for (
auto dim: tensor.dims())
267 if (shapeDims.empty())
269 shapeDims.push_back(1);
272 return ToTensorInfo(tensor.name(), shapeDims, tensor.data_type());
275 std::string TensorInfoAsString(
const TensorInfo& info,
276 const std::string& name,
280 std::stringstream ss;
281 ss <<
"tensor '" << name <<
"' contains " 282 << onnx::TensorProto::DataType_Name(type)
283 <<
" and has shape [";
287 ss << shape[i] <<
", ";
293 void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t* paddingFront,
294 uint32_t* paddingBack,
bool isUpper)
296 uint32_t outputSize = (inputSize + stride - 1) / stride;
297 uint32_t temp = (outputSize - 1) * stride + filterSize;
298 *paddingFront = (temp - inputSize) / 2;
299 *paddingBack = *paddingFront;
300 if((temp - inputSize) % 2 == 1)
315 const std::string& outName)
317 std::vector<int> targetDims;
323 targetDims.push_back(static_cast<int>(inShape[static_cast<uint>(i)]));
327 targetDims.push_back(val);
331 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
332 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
333 if (stretchDim != targetDims.end())
335 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
337 std::stringstream ss;
339 for(uint i = 0; i < targetDims.size() - 1; ++i)
341 ss << targetDims[i] <<
", ";
343 ss << targetDims[targetDims.size() - 1] <<
" ]";
346 boost::format(
"Error during creation of reshaped tensor '%1%'. At most one component of shape can be " 347 " -1 and here, shape is %2% %3%")
353 auto targetNumElements =
boost::numeric_cast<
unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
354 -1, std::multiplies<int32_t>()));
355 auto stretchIndex =
static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
356 outDims[stretchIndex] = inShape.
GetNumElements() / targetNumElements;
364 const std::map<std::string, OnnxParser::OperationParsingFunction> OnnxParser::m_ParserFunctions = {
365 {
"BatchNormalization", &OnnxParser::ParseBatchNormalization},
366 {
"GlobalAveragePool", &OnnxParser::ParseGlobalAveragePool},
367 {
"AveragePool", &OnnxParser::ParseAveragePool },
368 {
"Clip", &OnnxParser::ParseClip },
369 {
"Constant", &OnnxParser::ParseConstant },
370 {
"MaxPool", &OnnxParser::ParseMaxPool },
371 {
"Reshape", &OnnxParser::ParseReshape },
372 {
"Sigmoid", &OnnxParser::ParseSigmoid },
373 {
"Tanh", &OnnxParser::ParseTanh },
374 {
"Relu", &OnnxParser::ParseRelu },
375 {
"LeakyRelu", &OnnxParser::ParseLeakyRelu },
376 {
"Conv", &OnnxParser::ParseConv },
377 {
"Add", &OnnxParser::ParseAdd },
378 {
"Flatten", &OnnxParser::ParseFlatten},
381 template<
typename TypePair,
typename Location>
382 void OnnxParser::ValidateInputs(
const onnx::NodeProto& node,
383 TypePair validInputs,
384 const Location& location)
386 for(
auto input : node.input())
388 CheckValidDataType(validInputs.second,
389 m_TensorsInfo[input].m_dtype,
397 #define VALID_INPUTS(NODE, VALID_INPUTS) \ 398 OnnxParser::ValidateInputs(NODE, \ 402 std::vector<TensorInfo> OnnxParser::ComputeOutputInfo(std::vector<std::string> outNames,
404 std::vector<TensorShape> inputShapes)
407 bool needCompute = std::any_of(outNames.begin(),
409 [
this](std::string name)
411 return (m_TensorsInfo.count(name) == 0 || m_TensorsInfo[name].m_info ==
nullptr);
413 std::vector<TensorInfo> outInfo;
415 std::vector<TensorShape> inferredShapes;
421 for (uint i = 0; i < outNames.size(); ++i)
425 m_TensorsInfo[outNames[i]] = OnnxTensor();
426 m_TensorsInfo[outNames[i]].m_info = std::make_unique<TensorInfo>(
429 outInfo.push_back(*m_TensorsInfo[outNames[i]].m_info);
449 OnnxParser::OnnxParser()
450 : m_Network(nullptr, nullptr)
454 void OnnxParser::ResetParser()
460 void OnnxParser::Cleanup()
462 m_TensorConnections.clear();
463 m_TensorsInfo.clear();
464 m_OutputsMap.clear();
465 m_OutputsFusedAndUsed.clear();
468 std::pair<ConstTensor, std::unique_ptr<float[]>> OnnxParser::CreateConstTensor(
const std::string name)
470 const TensorInfo tensorInfo = *m_TensorsInfo[name].m_info;
471 onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor;
473 auto srcData = onnxTensor.float_data().data();
474 std::unique_ptr<float[]> tensorData(
new float[tensorInfo.
GetNumElements()]);
475 const size_t tensorSizeInBytes = tensorInfo.
GetNumBytes();
477 if (!onnxTensor.has_raw_data())
479 if(tensorInfo.
GetNumElements() !=
static_cast<uint
>(onnxTensor.float_data_size()))
482 boost::format(
"The number of data provided (%1%) does not match the tensor '%2%' number of elements" 484 % onnxTensor.float_data_size()
489 ::memcpy(tensorData.get(), srcData, tensorSizeInBytes);
493 ::memcpy(tensorData.get(), onnxTensor.raw_data().c_str(), tensorSizeInBytes);
500 boost::format(
"No tensor data found for Const tensor '%1%' %2%")
504 return std::make_pair(
ConstTensor(tensorInfo, tensorData.get()), std::move(tensorData));
509 FILE* fd = fopen(graphFile,
"r");
514 boost::format(
"Invalid (null) filename %1%") %
CHECK_LOCATION().AsString()));
518 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
519 using google::protobuf::io::FileInputStream;
520 std::unique_ptr<FileInputStream> input = std::make_unique<FileInputStream>(fileno(fd));
521 bool success = google::protobuf::TextFormat::Parse(input.get(), modelProto.get());
526 std::stringstream
error;
527 error <<
"Failed to parse graph file";
529 boost::format(
"%1% %2%") % error.str() %
CHECK_LOCATION().AsString()));
538 return CreateNetworkFromModel(*modelProto);
544 FILE* fd = fopen(graphFile,
"rb");
549 boost::format(
"Invalid (null) filename %1%") %
CHECK_LOCATION().AsString()));
553 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
555 google::protobuf::io::FileInputStream inStream(fileno(fd));
556 google::protobuf::io::CodedInputStream codedStream(&inStream);
557 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
558 bool success = modelProto.get()->ParseFromCodedStream(&codedStream);
563 std::stringstream
error;
564 error <<
"Failed to parse graph file";
566 boost::format(
"%1% %2%") % error.str() %
CHECK_LOCATION().AsString()));
576 return CreateNetworkFromModel(*modelProto);
584 boost::format(
"Invalid (empty) string for model parameter %1%") %
CHECK_LOCATION().AsString()));
587 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
588 bool success = google::protobuf::TextFormat::ParseFromString(protoText, modelProto.get());
591 std::stringstream
error;
592 error <<
"Failed to parse graph file";
594 boost::format(
"%1% %2%") % error.str() %
CHECK_LOCATION().AsString()));
603 return CreateNetworkFromModel(*modelProto);
606 INetworkPtr OnnxParser::CreateNetworkFromModel(onnx::ModelProto& model)
608 m_Network = INetwork::Create();
611 m_Graph = std::make_unique<onnx::GraphProto>(*model.mutable_graph());
620 return std::move(m_Network);
623 void OnnxParser::LoadGraph()
628 SetupInfo(m_Graph->mutable_output());
629 SetupInfo(m_Graph->mutable_input());
630 SetupInfo(m_Graph->mutable_value_info());
632 for (
auto tensor : m_Graph->initializer())
634 m_TensorsInfo[tensor.name()].m_tensor = std::make_unique<const onnx::TensorProto>(tensor);
635 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(
ToTensorInfo(tensor));
636 m_TensorsInfo[tensor.name()].m_dtype =
644 DetectFullyConnected();
647 for(
size_t nodeIndex = 0; nodeIndex < static_cast<size_t>(m_Graph->node_size()); nodeIndex++)
649 auto node = m_Graph->node(static_cast<int>(nodeIndex));
650 const std::string& operation = node.op_type();
653 if (operation ==
"MatMul" )
655 if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
658 AddFullyConnected(node);
661 else if (!(m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) && operation ==
"Add")
663 int matmulIndex =
static_cast<int> (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes[0]);
664 AddFullyConnected(m_Graph->node(matmulIndex), &node);
666 else if (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty())
668 auto it = m_ParserFunctions.find(operation);
669 if (it != m_ParserFunctions.end())
671 auto func = it->second;
677 boost::format(
"Unsupported operation %1% for node '%2%' %3%")
686 for (
const auto& tensorCon : m_TensorConnections)
688 if (tensorCon.second.outputSlot !=
nullptr)
690 for (
size_t inputSlotIdx = 0; inputSlotIdx < tensorCon.second.inputSlots.size(); ++inputSlotIdx)
692 tensorCon.second.outputSlot->Connect(*(tensorCon.second.inputSlots[inputSlotIdx]));
698 void OnnxParser::SetupInfo(
const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list)
700 for (
auto tensor : *list)
702 m_TensorsInfo[tensor.name()] = OnnxTensor();
703 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(
ToTensorInfo(tensor));
704 m_TensorsInfo[tensor.name()].m_dtype =
709 void OnnxParser::DetectFullyConnected()
711 m_OutputsFusedAndUsed = std::vector<UsageSummary> (
static_cast<size_t>(m_Graph->node_size()), UsageSummary());
712 auto matmulAndConstant = [&](
const std::string& constInput,
713 const std::string& matmulInput,
716 auto matmulIt = m_OutputsMap.find(matmulInput);
717 if(matmulIt != m_OutputsMap.end() && matmulIt->second.first->op_type() ==
"MatMul" 718 && m_TensorsInfo[constInput].isConstant())
720 nodeIndex = matmulIt->second.second;
726 for(
int nodeIndex = 0; nodeIndex < m_Graph->node_size(); nodeIndex++)
728 const onnx::NodeProto* node = &m_Graph->node(nodeIndex);
729 for (
const std::string& output : node->output())
731 m_OutputsMap[output] = std::make_pair(node, nodeIndex);
734 for (
const std::string& input : node->input())
736 auto matmulIt = m_OutputsMap.find(input);
737 if(matmulIt != m_OutputsMap.end()){
738 ++m_OutputsFusedAndUsed[
static_cast<size_t>(matmulIt->second.second)].inputForNodes;
742 if (node->op_type() ==
"Add")
745 if (matmulAndConstant(node->input(0), node->input(1), matmulIndex) ||
746 matmulAndConstant(node->input(1), node->input(0), matmulIndex))
749 m_OutputsFusedAndUsed[
static_cast<size_t>(matmulIndex)].fusedWithNodes
750 .push_back(static_cast<size_t>(nodeIndex));
752 m_OutputsFusedAndUsed[
static_cast<size_t>(nodeIndex)].fusedWithNodes
753 .push_back(static_cast<size_t>(matmulIndex));
758 for (
auto output: m_Graph->output()) {
759 auto matmulIt = m_OutputsMap.find(output.name());
760 if(matmulIt != m_OutputsMap.end()){
761 ++m_OutputsFusedAndUsed[
static_cast<size_t>(matmulIt->second.second)].inputForNodes;
766 template<
typename Location>
767 void OnnxParser::GetInputAndParam(
const onnx::NodeProto& node,
768 std::string* inputName,
769 std::string* constName,
770 const Location& location)
773 if (m_TensorsInfo[node.input(0)].isConstant())
777 else if (m_TensorsInfo[node.input(1)].isConstant())
784 boost::format(
"One of the input tensors ('%1%' or '%2%') should be constant in node '%3%' %4%")
788 % location.AsString()));
792 *constName = node.input(cstIndex);
796 *inputName = node.input(!cstIndex);
800 template<
typename Location>
801 void OnnxParser::To1DTensor(
const std::string& name,
const Location& location)
803 TensorShape shape = m_TensorsInfo[name].m_info->GetShape();
804 std::vector<uint32_t> newShape;
810 boost::format(
"Only tensors with shape [1, ..., 1, X] can be converted to 1D and %1% %2%")
811 % TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype)
812 % location.AsString()));
817 m_TensorsInfo[name].m_info->SetShape(
TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
820 void OnnxParser::AddConvLayerWithDepthwiseConv(
const onnx::NodeProto& node,
const Convolution2dDescriptor& convDesc)
834 auto weightTensor = CreateConstTensor(node.input(1));
835 TensorShape& weightShape = weightTensor.first.GetShape();
836 weightShape[1] = weightShape[0];
838 m_TensorsInfo[node.input(1)].m_info->SetShape(weightShape);
840 if (node.input_size() == 3)
842 if(!m_TensorsInfo[node.input(2)].isConstant())
845 boost::format(
"Bias '%1%' should be constant in Conv layer '%2%' %3%")
850 desc.m_BiasEnabled =
true;
851 auto biasTensor = CreateConstTensor(node.input(2));
852 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
855 node.name().c_str());
859 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
862 node.name().c_str());
866 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
867 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
868 m_TensorsInfo[node.input(1)].m_info->GetShape() });
874 RegisterInputSlots(layer, {node.input(0)});
877 RegisterOutputSlots(layer, {node.output(0)});
880 void OnnxParser::AddFullyConnected(
const onnx::NodeProto& matmulNode,
const onnx::NodeProto* addNode)
884 std::string weightName;
885 std::string inputName;
890 GetInputAndParam(matmulNode, &inputName, &weightName,
CHECK_LOCATION());
899 std::string biasName;
908 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
909 TensorInfo biasInfo = *m_TensorsInfo[biasName].m_info;
914 boost::format(
"Shape of weights '%1%' and bias of following Add node '%2%' do not match : %3%" 915 " and %4% ( /!\\ bias should be a 1D tensor) %5%")
918 % TensorInfoAsString(*m_TensorsInfo[weightName].m_info,
920 m_TensorsInfo[weightName].m_dtype)
921 % TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
922 m_TensorsInfo[biasName].m_dtype )
925 layer = m_Network->AddFullyConnectedLayer(desc,
926 CreateConstTensor(weightName).first,
928 matmulNode.name().c_str());
931 auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
932 {m_TensorsInfo[inputName].m_info->GetShape(),
933 m_TensorsInfo[weightName].m_info->GetShape()});
937 RegisterInputSlots(layer, {inputName});
938 RegisterOutputSlots(layer, {addNode->output(0)});
942 layer = m_Network->AddFullyConnectedLayer(desc,
943 CreateConstTensor(weightName).first,
945 matmulNode.name().c_str());
948 auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
949 {m_TensorsInfo[inputName].m_info->GetShape(),
950 m_TensorsInfo[weightName].m_info->GetShape()});
953 RegisterInputSlots(layer, {inputName});
954 RegisterOutputSlots(layer, {matmulNode.output(0)});
966 std::vector<uint32_t> kernel_shape = ReadMandatoryNodeUint32ListAttribute(node,
"kernel_shape");
967 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node,
"strides");
968 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node,
"pads");
989 std::string paddingString = ReadOptionalNodeStringAttribute(node,
"auto_pad");
990 if(paddingString !=
"VALID" && paddingString !=
"" && paddingString !=
"NOTSET")
993 if( paddingString ==
"SAME_LOWER")
997 else if (paddingString ==
"SAME_UPPER")
1004 boost::format(
"Invalid auto_pad attribute for node %1%. " 1005 "Only SAME_UPPER, SAME_LOWER or VALID supported and found %2% %3%")
1010 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1011 uint32_t inputHeight = inputInfo.GetShape()[2];
1012 uint32_t inputWidth = inputInfo.GetShape()[3];
1025 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
1028 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1033 RegisterInputSlots(layer, {node.input(0)});
1036 RegisterOutputSlots(layer, {node.output(0)});
1039 std::pair<std::string, std::string> OnnxParser::AddPrepareBroadcast(
const std::string& input0,
1040 const std::string& input1)
1042 std::pair<std::string, std::string> inputs = std::make_pair(input0, input1);
1044 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1045 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1049 auto outputName = boost::str(boost::format(
"reshape_output_%1%") % input1);
1050 PrependForBroadcast(outputName, input1, input0);
1051 inputs.second = outputName;
1055 auto outputName = boost::str(boost::format(
"reshape_output_%1%") % input0);
1056 PrependForBroadcast(outputName, input0, input1);
1057 inputs.first = outputName;
1062 void OnnxParser::CreateConstantLayer(
const std::string& tensorName,
const std::string& layerName)
1064 auto armnnTensor = CreateConstTensor(tensorName);
1066 IConnectableLayer* layer = m_Network->AddConstantLayer(armnnTensor.first, layerName.c_str());
1068 RegisterOutputSlots(layer, {tensorName});
1071 void OnnxParser::CreateReshapeLayer(
const std::string& inputName,
1072 const std::string& outputName,
1073 const std::string& layerName)
1075 const TensorInfo outputTensorInfo = *m_TensorsInfo[outputName].m_info;
1079 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1085 RegisterInputSlots(layer, {inputName});
1088 RegisterOutputSlots(layer, {outputName});
1101 if (func == ActivationFunction::BoundedReLu)
1103 desc.
m_A = node.input(2).empty() ? std::numeric_limits<float>::max() : std::stof(node.input(2));
1104 desc.
m_B = node.input(1).empty() ? std::numeric_limits<float>::lowest() : std::stof(node.input(1));
1107 IConnectableLayer*
const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
1110 auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1111 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1115 RegisterInputSlots(layer, {node.input(0)});
1118 RegisterOutputSlots(layer, {node.output(0)});
1121 void OnnxParser::ParseClip(
const onnx::NodeProto& node)
1123 ParseActivation(node, ActivationFunction::BoundedReLu);
1126 void OnnxParser::ParseSigmoid(
const onnx::NodeProto& node)
1128 ParseActivation(node, ActivationFunction::Sigmoid);
1131 void OnnxParser::ParseTanh(
const onnx::NodeProto& node)
1133 ParseActivation(node, ActivationFunction::TanH);
1136 void OnnxParser::ParseRelu(
const onnx::NodeProto& node)
1138 ParseActivation(node, ActivationFunction::ReLu);
1141 void OnnxParser::ParseLeakyRelu(
const onnx::NodeProto& node)
1143 ParseActivation(node, ActivationFunction::LeakyReLu);
1146 void OnnxParser::ParseAdd(
const onnx::NodeProto& node)
1157 auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
1158 auto input0 = *m_TensorsInfo[inputs.first].m_info;
1159 auto input1 = *m_TensorsInfo[inputs.second].m_info;
1160 ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
1162 unsigned int numDims = input0.GetNumDimensions();
1163 for (
unsigned int i = 0; i < numDims; i++)
1165 unsigned int dim0 = input0.GetShape()[i];
1166 unsigned int dim1 = input1.GetShape()[i];
1167 if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
1170 boost::format(
"Broadcast is only supported for scalar or 1D tensors in Add node '%1%'. " 1171 "Input dimensions should either match or one should be of size 1 and here, " 1174 % TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
1175 m_TensorsInfo[inputs.first].m_dtype)
1176 % TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
1177 m_TensorsInfo[inputs.second].m_dtype)
1186 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1187 { m_TensorsInfo[inputs.first].m_info->GetShape(),
1188 m_TensorsInfo[inputs.second].m_info->GetShape() });
1192 if(m_TensorsInfo[inputs.first].isConstant()) {
1193 CreateConstantLayer(inputs.first, boost::str(boost::format(
"Add:constant_of_%1%") % node.input(0)));
1195 if(m_TensorsInfo[inputs.second].isConstant()) {
1196 CreateConstantLayer(inputs.second, boost::str(boost::format(
"Add:constant_of_%1%") % node.input(1)));
1198 RegisterInputSlots(layer, {inputs.first, inputs.second});
1201 RegisterOutputSlots(layer, {node.output(0)});
1204 void OnnxParser::ParseAveragePool(
const onnx::NodeProto& node)
1209 uint32_t count_include_pad = 0;
1210 count_include_pad = ReadOptionalNodeUint32Attribute(node,
"count_include_pad");
1211 if(count_include_pad) {
1214 AddPoolingLayer(node, desc);
1217 void OnnxParser::ParseBatchNormalization(
const onnx::NodeProto& node)
1225 for(
int ind = 1; ind < node.input_size(); ++ind)
1227 auto tensor = node.input(ind);
1228 if(! m_TensorsInfo[tensor].isConstant())
1231 boost::format(
"Input tensor '%1%' should be constant in BatchNormalization node '%2%' %3%")
1238 float epsilon = ReadOptionalNodeFloatAttribute(node,
"epsilon", 1e-5f);
1240 desc.
m_Eps = epsilon;
1242 auto scaleTensor = CreateConstTensor(node.input(1));
1243 auto biasTensor = CreateConstTensor(node.input(2));
1244 auto meanTensor = CreateConstTensor(node.input(3));
1245 auto varTensor = CreateConstTensor(node.input(4));
1252 node.name().c_str());
1255 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1256 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1258 RegisterInputSlots(layer, {node.input(0)});
1261 RegisterOutputSlots(layer, {node.output(0)});
1264 void OnnxParser::ParseConstant(
const onnx::NodeProto& node)
1267 if (!node.attribute(0).has_t())
1270 boost::format(
"Value not found for Constant node '%1%' %2%")
1274 const onnx::TensorProto& onnxTensor = node.attribute(0).t();
1281 m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
1282 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(
ToTensorInfo(onnxTensor));
1285 CreateConstantLayer(node.output(0), node.name());
1288 void OnnxParser::ParseConv(
const onnx::NodeProto& node)
1295 if(m_TensorsInfo[node.input(0)].m_info->GetNumDimensions() != 4)
1298 boost::format(
"ArmNN only supports 2D convolution and Conv layer '%1%' input %2% %3%")
1300 % TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
1301 m_TensorsInfo[node.input(0)].m_dtype)
1305 if(!m_TensorsInfo[node.input(1)].isConstant())
1308 boost::format(
"Weights '%1%' should be constant in Conv layer '%2%' %3%")
1314 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1316 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node,
"dilations");
1317 if (!dilations.empty())
1319 std::stringstream ss;
1321 for (
auto dilation : dilations)
1323 ss << dilation <<
", ";
1328 boost::format(
"ArmNN only supports Convolution layers with dilations [1,1], and node '%1%' " 1329 "has dilatation %2% %3%")
1340 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node,
"strides");
1352 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node,
"pads");
1357 std::string paddingString = ReadOptionalNodeStringAttribute(node,
"auto_pad");
1358 if(paddingString !=
"VALID" && paddingString !=
"" && paddingString !=
"NOTSET")
1361 if( paddingString ==
"SAME_LOWER")
1365 else if (paddingString ==
"SAME_UPPER")
1372 boost::format(
"Invalid auto_pad attribute for node %1%. " 1373 "Only SAME_UPPER, SAME_LOWER or VALID supported and found %2% %3%")
1378 uint32_t inputHeight = inputInfo.GetShape()[2];
1379 uint32_t inputWidth = inputInfo.GetShape()[3];
1381 uint32_t weightHeight;
1382 uint32_t weightWidth;
1383 std::vector<uint32_t> kernel_shape = ReadOptionalNodeUint32ListAttribute(node,
"kernel_shape");
1384 if (kernel_shape.empty())
1386 const TensorInfo weightTensorInfo = *m_TensorsInfo[node.input(1)].m_info;
1387 weightHeight = weightTensorInfo.
GetShape()[2];
1388 weightWidth = weightTensorInfo.
GetShape()[3];
1392 weightHeight = kernel_shape[0];
1393 weightWidth = kernel_shape[1];
1407 uint32_t group = ReadOptionalNodeUint32Attribute(node,
"group", 1);
1410 if (group > inputInfo.GetShape()[1])
1415 "Error parsing Convolution node: %1%. " 1416 "The 'group'=%2% parameter cannot be larger than the " 1417 "channel of the input shape=%3% (in NCHW format). %4%") %
1420 inputInfo.GetShape()[1] %
1423 else if (group == inputInfo.GetShape()[1])
1427 AddConvLayerWithDepthwiseConv(node, desc);
1435 boost::format(
"Error parsing Convolution node: %1%. " 1436 "The 'group'=%2% parameter should be 1 or be equal to the " 1437 "channel of the input shape=%3% (in NCHW format). %4%") %
1440 inputInfo.GetShape()[1] %
1446 auto weightTensor = CreateConstTensor(node.input(1));
1448 if (node.input_size() == 3)
1450 if(!m_TensorsInfo[node.input(2)].isConstant())
1453 boost::format(
"Bias '%1%' should be constant in Conv layer '%2%' %3%")
1459 auto biasTensor = CreateConstTensor(node.input(2));
1460 layer = m_Network->AddConvolution2dLayer(desc,
1463 node.name().c_str());
1467 layer = m_Network->AddConvolution2dLayer(desc,
1470 node.name().c_str());
1474 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1475 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
1476 m_TensorsInfo[node.input(1)].m_info->GetShape() });
1481 RegisterInputSlots(layer, {node.input(0)});
1484 RegisterOutputSlots(layer, {node.output(0)});
1487 void OnnxParser::ParseFlatten(
const onnx::NodeProto& node)
1493 m_TensorsInfo[node.input(0)].m_dtype,
1494 onnx::TensorProto::FLOAT);
1496 int64_t axis = ReadOptionalNodeInt64Attribute(node,
"axis", 1);
1497 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1509 boost::format(
"Axis '%1%' invalid. Tensor has '%2%' dimensions in FlattenLayer '%3%'")
1519 for (i = 0; i < axis; i++){
1520 dimension1 *= inputShape[i];
1525 dimension2 *= inputShape[i];
1530 auto outInfo = ComputeReshapeInfo(outputShape, inputShape, node.output(0));
1531 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1532 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1535 void OnnxParser::ParseGlobalAveragePool(
const onnx::NodeProto& node)
1541 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1545 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
1548 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
1553 RegisterInputSlots(layer, {node.input(0)});
1556 RegisterOutputSlots(layer, {node.output(0)});
1559 void OnnxParser::ParseMaxPool(
const onnx::NodeProto& node)
1564 AddPoolingLayer(node, desc);
1567 void OnnxParser::ParseReshape(
const onnx::NodeProto& node)
1573 m_TensorsInfo[node.input(0)].m_dtype,
1574 onnx::TensorProto::FLOAT);
1576 m_TensorsInfo[node.input(1)].m_dtype,
1577 onnx::TensorProto::INT64);
1579 if(!m_TensorsInfo[node.input(1)].isConstant())
1582 boost::format(
"Shape '%1%' should be constant in Reshape layer '%2%' %3%")
1588 if(m_TensorsInfo[node.input(0)].isConstant())
1591 if(m_TensorsInfo.count(node.output(0)) == 0)
1593 m_TensorsInfo[node.output(0)] = OnnxTensor();
1595 m_TensorsInfo[node.output(0)].m_tensor =
1596 std::make_unique<onnx::TensorProto>(*m_TensorsInfo[node.input(0)].m_tensor);
1600 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1602 if(m_TensorsInfo.count(node.output(0)) == 0 || m_TensorsInfo[node.output(0)].m_info ==
nullptr)
1604 uint64_t dims =
static_cast<uint64_t
>(m_TensorsInfo[node.input(1)].m_tensor->int64_data_size());
1605 TensorShape targetShape{
static_cast<unsigned int>(dims), 1};
1607 for(uint i = 0; i < dims; i++)
1609 int val =
CHECKED_INT32(m_TensorsInfo[node.input(1)].m_tensor->int64_data(static_cast<int>(i)));
1610 targetShape[i]=
static_cast<unsigned int>(val);
1613 auto outInfo = ComputeReshapeInfo(targetShape, inputShape, node.output(0));
1614 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1617 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1621 void OnnxParser::PrependForBroadcast(
const std::string& outputName,
1622 const std::string& input0,
1623 const std::string& input1)
1628 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1629 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1632 std::vector<uint32_t> newShape;
1635 newShape.push_back(1);
1640 newShape.push_back(input0Shape[dim]);
1642 outputTensorInfo.
SetShape(
TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
1645 m_TensorsInfo[outputName] = OnnxTensor();
1646 m_TensorsInfo[outputName].m_info = std::make_unique<TensorInfo>(outputTensorInfo);
1649 if( ! m_TensorsInfo[input0].isConstant())
1651 CreateReshapeLayer(input0, outputName, boost::str(boost::format(
"Add:reshapeOf%1%") % input0));
1655 m_TensorsInfo[outputName].m_tensor = std::make_unique<onnx::TensorProto>(*m_TensorsInfo[input0].m_tensor);
1660 void OnnxParser::SetupInputLayers()
1663 for(
int inputIndex = 0; inputIndex < m_Graph->input_size(); ++inputIndex)
1665 auto input = m_Graph->input(inputIndex);
1666 if (! m_TensorsInfo[input.name()].isConstant())
1669 m_Network->AddInputLayer(static_cast<armnn::LayerBindingId>(inputIndex), input.name().c_str());
1673 RegisterOutputSlots(layer,{ input.name() });
1678 void OnnxParser::SetupOutputLayers()
1680 if(m_Graph->output_size() == 0)
1682 throw ParseException(boost::str(boost::format(
"The given model does not have any outputs %1%")
1686 for(
int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
1689 m_Network->AddOutputLayer(static_cast<armnn::LayerBindingId>(outputIndex),
1690 m_Graph->output(outputIndex).name().c_str());
1692 RegisterInputSlots(layer, { m_Graph->output(outputIndex).name() });
1696 void OnnxParser::RegisterInputSlots(
IConnectableLayer* layer,
const std::vector<std::string>& tensorIds)
1702 boost::str(boost::format(
"The number of tensor inputs (%1%) does not match the number expected (%2%) %3%") %
1707 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumInputSlots(); ++slotIndex)
1709 std::string tensorId = tensorIds[slotIndex];
1712 auto it = m_TensorConnections.find(tensorId);
1714 if (it == m_TensorConnections.end())
1717 m_TensorConnections[tensorId] = TensorSlots();
1719 m_TensorConnections[tensorId].inputSlots.push_back(slot);
1723 void OnnxParser::RegisterOutputSlots(
IConnectableLayer* layer,
const std::vector<std::string>& tensorIds)
1729 boost::str(boost::format(
"The number of tensor outputs (%1%) does not match the number expected (%2%) %3% ")
1735 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
1737 std::string tensorId = tensorIds[slotIndex];
1740 auto it = m_TensorConnections.find(tensorId);
1742 if (it == m_TensorConnections.end())
1745 m_TensorConnections[tensorId] = TensorSlots();
1748 TensorSlots& tensorSlots = m_TensorConnections[tensorId];
1751 if (tensorSlots.outputSlot !=
nullptr)
1754 boost::format(
"Another layer has already registered itself as the producer of " 1759 tensorSlots.outputSlot = slot;
1765 for(
int i = 0; i < m_Graph->input_size(); ++i)
1767 auto input = m_Graph->input(i);
1768 if(input.name() == name)
1770 return std::make_pair(static_cast<armnn::LayerBindingId>(i),
ToTensorInfo(input));
1779 for(
int i = 0; i < m_Graph->output_size(); ++i)
1781 auto output = m_Graph->output(i);
1782 if(output.name() == name)
1784 return std::make_pair(static_cast<armnn::LayerBindingId>(i),
ToTensorInfo(output));
1793 if(model ==
nullptr) {
1795 boost::format(
"The given model cannot be null %1%")
1799 std::vector<std::string> inputNames;
1800 std::map<std::string, bool> isConstant;
1801 for(
auto tensor : model->graph().initializer())
1803 isConstant[tensor.name()] =
true;
1805 for(
auto input : model->graph().input())
1807 auto it = isConstant.find(input.name());
1808 if(it == isConstant.end())
1810 inputNames.push_back(input.name());
1818 if(model ==
nullptr) {
1820 boost::format(
"The given model cannot be null %1%")
1824 std::vector<std::string> outputNames;
1825 for(
auto output : model->graph().output())
1827 outputNames.push_back(output.name());
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
std::string AsString() const
A ReshapeDescriptor for the ReshapeLayer.
virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network output identified by the given layer...
uint32_t m_PoolWidth
Pooling width value.
virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile) override
Create the network from a protobuf text file on disk.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
uint32_t m_PadRight
Padding right value in the width dimension.
#define VALID_INPUTS(NODE, VALID_INPUTS)
Copyright (c) 2020 ARM Limited.
static ModelPtr LoadModelFromBinaryFile(const char *fileName)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SetShape(const TensorShape &newShape)
static ModelPtr LoadModelFromTextFile(const char *fileName)
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile) override
Create the network from a protobuf binary file on disk.
TensorShape m_TargetShape
Target shape value.
static std::vector< std::string > GetInputs(ModelPtr &model)
Retrieve inputs names.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
static std::vector< std::string > GetOutputs(ModelPtr &model)
Retrieve outputs names.
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< onnx::ModelProto > ModelPtr
virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer ...
uint32_t m_PadRight
Padding right value in the width dimension.
An output connection slot for a layer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECKED_NON_NEGATIVE(VALUE)
#define ARMNN_ASSERT(COND)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
An ActivationDescriptor for the ActivationLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual armnn::INetworkPtr CreateNetworkFromString(const std::string &protoText) override
Create the network directly from protobuf text in a string. Useful for debugging/testing.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
static ModelPtr LoadModelFromString(const std::string &inputString)
#define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL,...)
armnn::BindingPointInfo BindingPointInfo
virtual std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const =0
Infer the shape of the output(s) based on the provided input shape(s)
#define CHECKED_INT32(VALUE)
A Pooling2dDescriptor for the Pooling2dLayer.
std::unique_ptr< IOnnxParser, void(*)(IOnnxParser *parser)> IOnnxParserPtr
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const