ArmNN
 21.02
TfParser.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TfParser.hpp"
7 
9 
10 #include <armnn/TypesUtils.hpp>
11 #include <armnn/Descriptors.hpp>
12 
13 #include <armnnUtils/Permute.hpp>
15 #include <armnnUtils/Transpose.hpp>
19 
20 #include <GraphTopologicalSort.hpp>
21 #include <ParserHelper.hpp>
22 
23 #include <google/protobuf/io/zero_copy_stream_impl.h>
24 #include <google/protobuf/text_format.h>
25 
26 #include <tensorflow/core/framework/graph.pb.h>
27 
28 #include <fmt/core.h>
29 #include <fmt/format.h>
30 #include <iostream>
31 #include <numeric>
32 
33 using namespace armnnUtils;
34 using namespace armnn;
35 
36 namespace armnnTfParser
37 {
38 
39 ITfParser::ITfParser() : pTfParserImpl(new ITfParser::TfParserImpl()){}
40 
41 ITfParser::~ITfParser() = default;
42 
43 ITfParser *ITfParser::CreateRaw()
44 {
45  return new ITfParser();
46 }
47 
48 ITfParserPtr ITfParser::Create()
49 {
50  return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
51 }
52 
53 void ITfParser::Destroy(ITfParser *parser)
54 {
55  delete parser;
56 }
57 
58 armnn::INetworkPtr ITfParser::CreateNetworkFromTextFile(const char* graphFile,
59  const std::map<std::string, armnn::TensorShape>& inputShapes,
60  const std::vector<std::string>& requestedOutputs)
61 {
62  return pTfParserImpl->CreateNetworkFromTextFile(graphFile, inputShapes, requestedOutputs);
63 }
64 
65 armnn::INetworkPtr ITfParser::CreateNetworkFromBinaryFile(const char* graphFile,
66  const std::map<std::string, armnn::TensorShape>& inputShapes,
67  const std::vector<std::string>& requestedOutputs)
68 {
69  return pTfParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs);
70 }
71 
72 armnn::INetworkPtr ITfParser::CreateNetworkFromString(const char* protoText,
73  const std::map<std::string, armnn::TensorShape>& inputShapes,
74  const std::vector<std::string>& requestedOutputs)
75 {
76  return pTfParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs);
77 }
78 
79 BindingPointInfo ITfParser::GetNetworkInputBindingInfo(const std::string& name) const
80 {
81  return pTfParserImpl->GetNetworkInputBindingInfo(name);
82 }
83 
84 BindingPointInfo ITfParser::GetNetworkOutputBindingInfo(const std::string& name) const
85 {
86  return pTfParserImpl->GetNetworkOutputBindingInfo(name);
87 }
88 namespace
89 {
90 
91 const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
92 const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
93 
94 
95 template <typename Callable>
96 void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
97  const std::string& attribName,
98  tensorflow::AttrValue::ValueCase expectedValueCase,
99  Callable callable)
100 {
101  auto iter = nodeDef.attr().find(attribName);
102  if (iter != nodeDef.attr().end())
103  {
104  const auto& attrValue = iter->second;
105  if (attrValue.value_case() == expectedValueCase)
106  {
107  callable(attrValue);
108  }
109  else
110  {
111  throw ParseException(
112  fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
113  "but found {} instead {}",
114  attribName,
115  nodeDef.name(),
116  static_cast<int>(expectedValueCase),
117  static_cast<int>(attrValue.value_case()),
118  CHECK_LOCATION().AsString()));
119  }
120  }
121  else
122  {
123  throw ParseException(
124  fmt::format("Could not find required attribute {} in node {} {}",
125  attribName,
126  nodeDef.name(),
127  CHECK_LOCATION().AsString()));
128  }
129 }
130 
131 template <typename Callable>
132 void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
133  const std::string& attribName,
134  tensorflow::AttrValue::ValueCase expectedValueCase,
135  Callable callable)
136 {
137  auto iter = nodeDef.attr().find(attribName);
138  if (iter != nodeDef.attr().end())
139  {
140  const auto& attrValue = iter->second;
141  if (attrValue.value_case() == expectedValueCase)
142  {
143  callable(attrValue);
144  }
145  else
146  {
147  throw ParseException(
148  fmt::format("Attribute {} of node {} expected to have {} as tensorflow::AttrValue::ValueCase, "
149  "but found {} instead {}",
150  attribName,
151  nodeDef.name(),
152  static_cast<int>(expectedValueCase),
153  static_cast<int>(attrValue.value_case()),
154  CHECK_LOCATION().AsString()));
155  }
156  }
157 }
158 
159 float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
160 {
161  float attribValue = 0.0f;
162  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
163  [&attribValue](const tensorflow::AttrValue& attrValue)
164  {
165  attribValue = attrValue.f();
166  });
167  return attribValue;
168 }
169 
170 int32_t ReadMandatoryNodeInt32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
171 {
172  int32_t attribValue = 0u;
173  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
174  [&attribValue](const tensorflow::AttrValue& attrValue)
175  {
176  attribValue = static_cast<int32_t>(attrValue.i());
177  });
178  return attribValue;
179 }
180 
181 bool ReadMandatoryNodeBoolAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
182 {
183  bool attribValue = false;
184  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
185  [&attribValue](const tensorflow::AttrValue& attrValue)
186  {
187  attribValue = static_cast<bool>(attrValue.b());
188  });
189  return attribValue;
190 }
191 
192 uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
193 {
194  uint32_t attribValue = 0u;
195  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
196  [&attribValue](const tensorflow::AttrValue& attrValue)
197  {
198  attribValue = static_cast<uint32_t>(attrValue.i());
199  });
200  return attribValue;
201 }
202 
203 std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
204 {
205  std::string attribValue = "";
206  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
207  [&attribValue](const tensorflow::AttrValue& attrValue)
208  {
209  attribValue = attrValue.s();
210  });
211  return attribValue;
212 }
213 
214 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
215  const std::string& name)
216 {
217  std::vector<uint32_t> attriList;
218  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
219  [&attriList](const tensorflow::AttrValue& attrValue)
220  {
221  for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
222  {
223  attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
224  }
225  });
226 
227  return attriList;
228 }
229 
230 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
231  const std::string& name)
232 {
233  std::vector<uint32_t> attriList;
234  ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
235  [&attriList](const tensorflow::AttrValue& attrValue)
236  {
237  for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
238  {
239  attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
240  }
241  });
242 
243  return attriList;
244 }
245 
246 std::string ReadOptionalNodeStringAttribute(const tensorflow::NodeDef& nodeDef,
247  const std::string& name,
248  const std::string& defaultValue = "")
249 {
250  std::string attribValue = defaultValue;
251  ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
252  [&attribValue](const tensorflow::AttrValue& attrValue)
253  {
254  attribValue = attrValue.s();
255  });
256  return attribValue;
257 }
258 
259 bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
260  const std::string& name,
261  bool defaultValue = false)
262 {
263  bool attribValue = defaultValue;
264  ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
265  [&attribValue](const tensorflow::AttrValue& attrValue)
266  {
267  attribValue = attrValue.b();
268  });
269  return attribValue;
270 }
271 
272 tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
273 {
274  tensorflow::DataType attribValue = tensorflow::DT_INVALID;
275  ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
276  [&attribValue](const tensorflow::AttrValue& attrValue)
277  {
278  attribValue = attrValue.type();
279  });
280  return attribValue;
281 }
282 
283 TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
284 {
285  std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
286  const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
287 
288  if (stretchDim != targetDims.end())
289  {
290  if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
291  {
292  throw ParseException(
293  fmt::format("At most one component of shape can be -1 {}",
294  CHECK_LOCATION().AsString()));
295  }
296 
297  auto targetNumElements =
298  armnn::numeric_cast<unsigned int>(
299  std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
300  auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
301  outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
302  }
303 
304  TensorInfo reshapeInfo = input;
305  reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
306 
307  return reshapeInfo;
308 }
309 
310 // We need the input0Slot to guide the reshape for input1Slot.
311 IOutputSlot* AddBroadcastReshapeLayer(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC,
312  INetwork& m_Network, const tensorflow::NodeDef& nodeDef)
313 {
314  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
315  const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
316  const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
317  std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
318  std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
319  reshapedDimensions[matchDim] = input1Info.GetShape()[0];
320 
321  armnn::TensorInfo reshapedInfo = input1Info;
322  reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
323 
324  const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
325  ReshapeDescriptor reshapeDesc;
326  reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
327  IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
328 
329  input1Slot->Connect(reshapeLayer->GetInputSlot(0));
330  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
331 
332  input1Slot = &reshapeLayer->GetOutputSlot(0);
333 
334  return input1Slot;
335 }
336 
337 OutputId ParseOutputId(const std::string & name)
338 {
339  unsigned int outputNum = 0;
340  size_t colonPos = name.find_last_of(":");
341  if (colonPos != std::string::npos)
342  {
343  int n = std::stoi(name.substr(colonPos+1));
344  if (n<0 || n>100)
345  {
346  throw ParseException(
347  fmt::format("Output tensor id is out of range for {} {}",
348  name,
349  CHECK_LOCATION().AsString()));
350  }
351  outputNum = static_cast<unsigned int>(n);
352  }
353  return OutputId(name.substr(0,colonPos),outputNum);
354 }
355 
356 #define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
357  if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
358  { \
359  throw ParseException( \
360  fmt::format("Unsupported data format {} passed for {} node {}. " \
361  "Only NHWC and NCHW supported {}", \
362  FORMAT, \
363  NODE_TYPE, \
364  NODE_DEF.name(), \
365  CHECK_LOCATION().AsString())); \
366  }
367 
368 #define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
369  if(PADDING != "SAME" && PADDING != "VALID" ) \
370  { \
371  throw ParseException( \
372  fmt::format("Only 'SAME' and 'VALID' padding supported. Got {} for {} {}", \
373  PADDING, \
374  NODE_DEF.name(), \
375  CHECK_LOCATION().AsString())); \
376  } \
377 
378 } // namespace
379 
380 const std::map<std::string, ITfParser::TfParserImpl::OperationParsingFunction>
381  ITfParser::TfParserImpl::ms_OperationNameToParsingFunctions = {
382  { "Const", &TfParserImpl::ParseConst },
383  { "Add", &TfParserImpl::ParseAdd },
384  { "AddN", &TfParserImpl::ParseAddN },
385  { "BiasAdd", &TfParserImpl::ParseBiasAdd },
386  { "Identity", &TfParserImpl::ParseIdentity },
387  { "Conv2D", &TfParserImpl::ParseConv2D },
388  { "DepthwiseConv2dNative", &TfParserImpl::ParseDepthwiseConv2D },
389  { "ExpandDims", &TfParserImpl::ParseExpandDims },
390  { "FusedBatchNorm", &TfParserImpl::ParseFusedBatchNorm },
391  { "Gather", &TfParserImpl::ParseGather},
392  { "Greater", &TfParserImpl::ParseGreater},
393  { "ConcatV2", &TfParserImpl::ParseConcat },
394  { "LRN", &TfParserImpl::ParseLrn },
395  { "MatMul", &TfParserImpl::ParseMatMul },
396  { "Mean", &TfParserImpl::ParseMean },
397  { "Mul", &TfParserImpl::ParseMul },
398  { "Placeholder", &TfParserImpl::ParsePlaceholder },
399  { "RealDiv", &TfParserImpl::ParseRealDiv },
400  { "Relu", &TfParserImpl::ParseRelu },
401  { "Relu6", &TfParserImpl::ParseRelu6 },
402  { "Reshape", &TfParserImpl::ParseReshape },
403  { "ResizeBilinear", &TfParserImpl::ParseResizeBilinear },
404  { "Rsqrt", &TfParserImpl::ParseRsqrt },
405  { "Shape", &TfParserImpl::ParseShape },
406  { "Squeeze", &TfParserImpl::ParseSqueeze },
407  { "Sigmoid", &TfParserImpl::ParseSigmoid },
408  { "Softmax", &TfParserImpl::ParseSoftmax },
409  { "Softplus", &TfParserImpl::ParseSoftplus },
410  { "Split", &TfParserImpl::ParseSplit },
411  { "StridedSlice", &TfParserImpl::ParseStridedSlice },
412  { "Tanh", &TfParserImpl::ParseTanh },
413  { "MaxPool", &TfParserImpl::ParseMaxPool },
414  { "AvgPool", &TfParserImpl::ParseAvgPool },
415  { "Maximum", &TfParserImpl::ParseMaximum },
416  { "Minimum", &TfParserImpl::ParseMinimum },
417  { "Equal", &TfParserImpl::ParseEqual },
418  { "Pad", &TfParserImpl::ParsePad },
419  { "Sub", &TfParserImpl::ParseSub },
420  { "Pack" , &TfParserImpl::ParseStack },
421  { "Stack", &TfParserImpl::ParseStack },
422  { "Transpose", &TfParserImpl::ParseTranspose },
423 };
424 
425 const std::list<std::string> ITfParser::TfParserImpl::m_ControlInputs = {
426  "Assert"
427 };
428 
429 void CalcPadding(uint32_t inputSize,
430  uint32_t filterSize,
431  uint32_t stride,
432  uint32_t dilation,
433  uint32_t& paddingFront,
434  uint32_t& paddingBack,
435  bool samePadding)
436 {
437  paddingFront = 0;
438  paddingBack = 0;
439  if (samePadding)
440  {
441  uint32_t outputSize = (inputSize + stride - 1) / stride;
442  uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
443  uint32_t temp = (outputSize - 1) * stride + dilatedSize;
444  if (temp > inputSize)
445  {
446  paddingFront = (temp - inputSize) / 2;
447  paddingBack = (temp - inputSize) - paddingFront;
448  }
449  }
450 }
451 
452 /// An Abstract base class which represents a single tensorflow operation (node)
453 /// that has been (potentially partially) converted to Armnn.
454 /// It may not yet have been fully converted into actual Armnn layers.
455 class ParsedTfOperation
456 {
457 public:
458  ParsedTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
459  : m_Parser(parser)
460  , m_Node(node)
461  {
462  }
463 
464  virtual ~ParsedTfOperation() {};
465 
466  const tensorflow::NodeDef& GetNode() const { return m_Node; }
467 
468  /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
469  /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
470  virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
471 
472  /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
473  virtual ParsedTfOperation* ResolveIdentityOperations()
474  {
475  return this;
476  }
477 
478 protected:
479  ITfParser::TfParserImpl* m_Parser;
480  const tensorflow::NodeDef& m_Node;
481 };
482 
483 /// An ParsedTfOperation where the Armnn equivalent is a single layer,
484 /// with output slots that correspond directly to the Tf node outputs.
485 class SingleLayerParsedTfOperation : public ParsedTfOperation
486 {
487 public:
488  SingleLayerParsedTfOperation(ITfParser::TfParserImpl* parser,
489  const tensorflow::NodeDef& node,
490  IConnectableLayer* layer)
491  : ParsedTfOperation(parser, node)
492  , m_Layer(layer)
493  {
494  }
495 
496  IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
497  {
499  // Assumes one-to-one mapping between Tf and armnn output slots.
500  unsigned int armnnOutputSlotIdx = tfOutputIndex;
501  if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
502  {
503  throw ParseException(
504  fmt::format("The requested output slot #{} "
505  "for {} does not exist {}",
506  armnnOutputSlotIdx,
507  m_Layer->GetName(),
508  CHECK_LOCATION().AsString()));
509  }
510  return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
511  }
512 
513 protected:
515 };
516 
517 /// A SingleLayerParsedTfOperation for deferred layer creation.
518 class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
519 {
520 public:
521  DeferredSingleLayerParsedTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
522  : SingleLayerParsedTfOperation(parser, node, nullptr)
523  {
524  }
525 
526  IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
527  {
528  if (!m_Layer)
529  {
530  CreateLayerDeferred();
531  }
532  return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
533  }
534 
535 private:
536  virtual void CreateLayerDeferred() = 0;
537 };
538 
539 
540 ITfParser::TfParserImpl::TfParserImpl()
541  : m_Network(nullptr, nullptr)
542 {
543 }
544 
545 
546 const tensorflow::NodeDef* ITfParser::TfParserImpl::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
547 {
548  if (nodeDef->op() != "Identity")
549  {
550  return nodeDef;
551  }
552 
553  if (nodeDef->input_size() != 1)
554  {
555  throw ParseException(
556  fmt::format("Identity node should have a single input! {} has {} inputs {}",
557  nodeDef->name(),
558  nodeDef->input_size(),
559  CHECK_LOCATION().AsString()));
560  }
561 
562  auto it = m_NodesByName.find(nodeDef->input(0));
563  if (it != m_NodesByName.end())
564  {
565  const tensorflow::NodeDef* inputNode = it->second;
566  return ResolveIdentityNode(inputNode);
567  }
568  else
569  {
570  throw ParseException(
571  fmt::format("Cannot find what the Identity node {} is linked to! {}",
572  nodeDef->name(),
573  CHECK_LOCATION().AsString()));
574  }
575 }
576 
577 std::vector<OutputOfConstNodeDef>
578 ITfParser::TfParserImpl::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
579 {
580  std::vector<OutputOfConstNodeDef> ret;
581 
582  if (nodeDef.op() == "Const")
583  {
584  // For some reason const node can have "Control Inputs". We ignore them for now.
585  return ret;
586  }
587 
588  ret.reserve(armnn::numeric_cast<size_t>(nodeDef.input_size()));
589  for (int j = 0; j < nodeDef.input_size(); ++j)
590  {
591  OutputId outputId = ParseOutputId(nodeDef.input(j));
592 
593  if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
594  {
595  // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
596  continue;
597  }
598 
599  auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
600  if (inputIt == m_NodesByName.end())
601  {
602  throw ParseException(
603  fmt::format("Can't find node '{}', which is listed as an input of '{}' {}",
604  nodeDef.input(j),
605  nodeDef.name(),
606  CHECK_LOCATION().AsString()));
607  }
608  ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
609  }
610 
611  return ret;
612 }
613 
614 std::vector<OutputOfParsedTfOperation>
616  std::size_t expectedNumInputs)
617 {
618  // Fetches the tensorflow nodes connected as inputs and validate the size.
619  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
620  const std::size_t numInputs = nodes.size();
621  if (numInputs != expectedNumInputs)
622  {
623  throw ParseException(
624  fmt::format("Unexpected number of inputs for node {}. Expected {}, found {} {}",
625  nodeDef.name(),
626  expectedNumInputs,
627  numInputs,
628  CHECK_LOCATION().AsString()));
629  }
630  // Fetches the corresponding ParsedTfOperation operations
631  std::vector<OutputOfParsedTfOperation> result;
632  for (auto&& node : nodes)
633  {
634  auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
635  if (it == m_ParsedTfOperations.end())
636  {
637  throw ParseException(
638  fmt::format("Node with name '{}' has not been parsed {}",
639  node.m_IndexedValue->name(),
640  CHECK_LOCATION().AsString()));
641  }
642  ParsedTfOperation* parsedOp = it->second.get();
643  // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
644  parsedOp = parsedOp->ResolveIdentityOperations();
645  result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
646  }
647  return result;
648 }
649 
651  const tensorflow::NodeDef& nodeDef,
652  IOutputSlot* input0Slot,
653  IOutputSlot* input1Slot,
654  const std::string& layerName)
655 {
656  const TensorInfo& input0Info = input0Slot->GetTensorInfo();
657  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
658 
659  const unsigned int input0Dim = input0Info.GetNumDimensions();
660  const unsigned int input1Dim = input1Info.GetNumDimensions();
661  if (input0Dim != input1Dim)
662  {
663  // broadcasting where input0 and input1 have different number of dimensions
664  // is only supported for 1D and 4D tensors pair
665  if (input0Dim == 1 && input1Dim == 4)
666  {
667  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
668  }
669  else if (input0Dim == 4 && input1Dim == 1)
670  {
671  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
672  }
673  else
674  {
675  throw ParseException(
676  fmt::format("Unsupported broadcast configuration for {} operation {} {}",
677  layerName,
678  nodeDef.name(),
679  CHECK_LOCATION().AsString()));
680  }
681  }
682  IConnectableLayer* const layer = m_Network->AddAdditionLayer(layerName.c_str());
683 
684  input0Slot->Connect(layer->GetInputSlot(0));
685  input1Slot->Connect(layer->GetInputSlot(1));
686 
687  // Ensure the output tensor has the correct dimensions even if a broadcast has been done
688  TensorInfo outputInfo = input0Slot->GetTensorInfo();
689  std::vector<unsigned int> outputShape;
690 
691  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
692  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
693 
694  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
695  {
696  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
697  }
698 
699  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
700  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
701 
702  return layer;
703 }
704 
706  const tensorflow::NodeDef& nodeDef,
707  IConnectableLayer* layerOne,
708  IConnectableLayer* layerTwo,
709  unsigned int numberOfAddition,
710  unsigned long numberOfLayersToConnect,
711  bool isOdd)
712 {
713  IOutputSlot* input0Slot = &layerOne->GetOutputSlot(0);
714  IOutputSlot* input1Slot = &layerTwo->GetOutputSlot(0);
715  std::string layerName(nodeDef.name());
716  if (isOdd || numberOfLayersToConnect != 2)
717  {
718  // we are not connecting the final layer
719  layerName.append("_addN_").append(std::to_string(numberOfAddition));
720  }
721  return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
722 }
723 
725  const tensorflow::NodeDef& nodeDef,
726  const OutputOfParsedTfOperation& opOne,
727  const OutputOfParsedTfOperation& opTwo,
728  unsigned int numberOfAddition)
729 {
730  IOutputSlot* input0Slot = &opOne.m_IndexedValue->ResolveArmnnOutputSlot(opOne.m_Index);
731  IOutputSlot* input1Slot = &opTwo.m_IndexedValue->ResolveArmnnOutputSlot(opTwo.m_Index);
732  std::string layerName(nodeDef.name());
733  layerName.append("_addN_").append(std::to_string(numberOfAddition));
734  return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
735 }
736 
738  const tensorflow::NodeDef& nodeDef,
739  const OutputOfParsedTfOperation& op,
740  IConnectableLayer* layer)
741 {
742  IOutputSlot* input0Slot = &op.m_IndexedValue->ResolveArmnnOutputSlot(op.m_Index);
743  IOutputSlot* input1Slot = &layer->GetOutputSlot(0);
744  return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
745 }
746 
747 ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAddN(const tensorflow::NodeDef& nodeDef,
748  const tensorflow::GraphDef& graphDef)
749 {
750  IgnoreUnused(graphDef);
751  uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
752  if (numberOfInputs < 2)
753  {
754  // should never happen
755  throw ParseException(
756  fmt::format("AddN Node with name '{}' has less than two ({}) inputs {}",
757  nodeDef.name(),
758  std::to_string(numberOfInputs),
759  CHECK_LOCATION().AsString()));
760  }
761  else if (numberOfInputs == 2)
762  {
763  //this is the same as a simple Add operation
764  return AddAdditionLayer(nodeDef, false);
765  }
766  else
767  {
768  // build a binary tree of Add layers and return the final Add as the return from the function
769  // if we have an odd number of inputs then the final Add will consist of a layer connecting to an
770  // OutputOfParsedTfOperation, otherwise it will be two layers being added together
771  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numberOfInputs);
772  unsigned int numberOfAdditions = 0;
773  std::vector<IConnectableLayer*> layers;
774  // NOTE: at this point we will have a minimum of three inputs
775  for (unsigned int i = 0; i < numberOfInputs; ++i)
776  {
777  // every time i is odd we have two inputs to process.
778  bool onSecondItem = i % 2;
779  if (onSecondItem)
780  {
781  ++numberOfAdditions;
783  nodeDef, inputs[ i - 1], inputs[i], numberOfAdditions);
784  layers.push_back(newLayer);
785  }
786  }
787 
788  std::vector<IConnectableLayer*> layersToConnect(layers);
789  unsigned long numberOfLayersToConnect = layersToConnect.size();
790  bool isOdd = numberOfInputs % 2;
791 
792  while (numberOfLayersToConnect > 1)
793  {
794  layers.clear();
795  for (unsigned long i = 0; i < numberOfLayersToConnect; ++i) {
796  bool onSecondItem = i % 2;
797  if (onSecondItem) {
798  ++numberOfAdditions;
800  nodeDef,
801  layersToConnect[i - 1],
802  layersToConnect[i],
803  numberOfAdditions,
804  numberOfLayersToConnect,
805  isOdd);
806  layers.push_back(newLayer);
807  }
808  }
809  //OK... need to go again... maybe
810  layersToConnect = layers;
811  numberOfLayersToConnect = layersToConnect.size();
812  }
813  IConnectableLayer* finalLayer = layersToConnect[0];
814  // if we had an odd number of inputs we need to connect the final layer to the
815  // last OutputOfParsedTfOperation in order to create the last Add layer we will
816  // be handing back.
817  if (isOdd)
818  {
819  // connect the final layer to the last op
820  finalLayer = CreateAdditionLayer(nodeDef, inputs[numberOfInputs - 1], finalLayer);
821  }
822  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, finalLayer);
823  }
824 }
825 
826 ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAdd(const tensorflow::NodeDef& nodeDef,
827  const tensorflow::GraphDef& graphDef)
828 {
829  IgnoreUnused(graphDef);
830  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
831 
832  // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
833  // together as FullyConnected.
834  if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
835  HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
836  {
837  IConnectableLayer* layer =
838  AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
839  &nodeDef,nodeDef.name().c_str());
840  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
841  }
842  else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
843  inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
844  {
845  IConnectableLayer* layer =
846  AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
847  &nodeDef,nodeDef.name().c_str());
848  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
849  }
850  else
851  {
852  // Otherwise it's just a regular addition.
853  return AddAdditionLayer(nodeDef);
854  }
855 }
856 
858  const tensorflow::GraphDef& graphDef)
859 {
860  IgnoreUnused(graphDef);
861  return AddAdditionLayer(nodeDef, true);
862 }
863 
864 /// An ParsedTfOperation which forwards to another (used for Identity nodes).
866 {
867 public:
869  const tensorflow::NodeDef& node,
870  ParsedTfOperation* representative)
871  : ParsedTfOperation(parser, node)
872  , m_Representative(representative)
873  {
874  }
875 
876  virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
877  {
878  ARMNN_ASSERT(m_Representative);
879  return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
880  }
881 
882  virtual ParsedTfOperation* ResolveIdentityOperations() override
883  {
884  return m_Representative->ResolveIdentityOperations();
885  }
886 
887 private:
888  ParsedTfOperation* m_Representative;
889 };
890 
892  const tensorflow::GraphDef& graphDef)
893 {
894  IgnoreUnused(graphDef);
895  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
896  // Any requests for the output slots of this node should be forwarded to the node connected as input.
897  return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
898 }
899 
900 /// An ParsedTfOperation for a Const node.
901 /// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
902 /// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
903 template <typename T>
905 {
906 public:
907  ParsedConstTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node,
908  const T* tensorData, const TensorInfo& tensorInfo)
909  : DeferredSingleLayerParsedTfOperation(parser, node),
910  m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
911  m_TensorInfo(tensorInfo)
912  {
913  ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
914  }
915 
916  void CreateLayerDeferred() override
917  {
918  ARMNN_ASSERT(m_Layer == nullptr);
919  m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage),
920  m_Node.name().c_str());
921  m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
922  }
923 
924  ConstTensor GetConstTensor(std::vector<T>& outputTensorData) const
925  {
926  outputTensorData.resize(m_TensorInfo.GetNumElements());
927 
928  memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
929 
930  // Updates the result to point to the user provided storage.
931  ConstTensor constTensor(m_TensorInfo, outputTensorData);
932  return constTensor;
933  }
934 
935  const T* GetStorage() const
936  {
937  return m_Storage.data();
938  }
939 
940  const TensorInfo& GetTensorInfo() const
941  {
942  return m_TensorInfo;
943  }
944 
945 private:
946  ///< Manages the lifetime of the tensor data.
947  std::vector<T> m_Storage;
948  ///< Describes the layout of the tensor and points to the data in m_Storage.
949  TensorInfo m_TensorInfo;
950 };
951 
953  const tensorflow::NodeDef& nodeDef)
954 {
955  switch (tfDataType)
956  {
957  case tensorflow::DT_FLOAT:
958  return DataType::Float32;
959  break;
960  case tensorflow::DT_INT32:
961  return DataType::Signed32;
962  break;
963  default:
964  throw ParseException(
965  fmt::format("Unknown DataType {} for node {} {}",
966  tensorflow::DataType_Name(tfDataType),
967  nodeDef.name(),
968  CHECK_LOCATION().AsString()));
969  }
970 }
971 
972 struct ParseTfTensorValueList
973 {
974  template<typename DataType>
975  static void Parse(
976  const tensorflow::TensorProto& tfTensor,
977  unsigned int dstElements,
978  std::vector<int8_t>& outputData);
979 
980  template <typename DataType>
981  static void ReadData(const void* srcData, unsigned int numSrcElements,
982  std::vector<int8_t>& dstData, unsigned int numDstElements)
983  {
984  // If there are no entries in the list, perform no action.
985  if (numSrcElements == 0)
986  {
987  return;
988  }
989 
990  // If no size was provided, use the length of the value list.
991  if (numDstElements == 0)
992  {
993  numDstElements = numSrcElements;
994  }
995 
996  // Allocates memory.
997  dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
998 
999  const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
1000  DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
1001 
1002  // Copies the value list entries into the destination.
1003  std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
1004 
1005  if (numDstElements > numSrcElements)
1006  {
1007  // Uses the last element in the list to fill the remaining entries.
1008  std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
1009  }
1010  }
1011 
1012 };
1013 
1014 template <>
1015 void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
1016  unsigned int dstElements, std::vector<int8_t>& outputData)
1017 {
1018  ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
1019  outputData, dstElements);
1020 }
1021 
1022 template <>
1023 void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
1024  unsigned int dstElements, std::vector<int8_t>& outputData)
1025 {
1026  ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
1027  outputData, dstElements);
1028 }
1029 
1030 template <template<typename> class OperatorType, typename T = int8_t>
1031 struct MakeTfOperation
1032 {
1033  template<typename DataType, class... Args>
1034  inline static std::unique_ptr<OperatorType<DataType>> Parse(ITfParser::TfParserImpl* parser,
1035  const tensorflow::NodeDef& node,
1036  Args&&... args)
1037  {
1038  return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
1039  }
1040 };
1041 
1042 template <>
1043 struct MakeTfOperation<ParsedConstTfOperation>
1044 {
1045  template<typename DataType, class... Args>
1046  inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(ITfParser::TfParserImpl* parser,
1047  const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
1048  {
1049  return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
1050  reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
1051  }
1052 };
1053 
1054 template <class FuncType>
1055 struct InvokeParseFunction
1056 {
1057  template<class ResType, class... Args>
1058  inline static ResType Result(DataType dataType, Args&&... args)
1059  {
1060  if (dataType == DataType::Float32)
1061  {
1062  return FuncType::template Parse<float>(std::forward<Args>(args)...);
1063  }
1064  else if (dataType == DataType::Signed32)
1065  {
1066  return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1067  }
1068 
1069  return ResType();
1070  }
1071 
1072  template<class... Args>
1073  inline static void Result(DataType dataType, Args&&... args)
1074  {
1075  if (dataType == DataType::Float32)
1076  {
1077  FuncType::template Parse<float>(std::forward<Args>(args)...);
1078  }
1079  else if (dataType == DataType::Signed32)
1080  {
1081  FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
1082  }
1083  }
1084 };
1085 
1087  const tensorflow::GraphDef& graphDef)
1088 {
1089  IgnoreUnused(graphDef);
1090  ARMNN_ASSERT(nodeDef.op() == "Const");
1091 
1092  if (nodeDef.attr().count("value") == 0)
1093  {
1094  throw ParseException(
1095  fmt::format("Value not found for Const node - {} {}",
1096  nodeDef.name(),
1097  CHECK_LOCATION().AsString()));
1098  }
1099 
1100  const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
1101  const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
1102  const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
1103 
1104  const auto GetDimensionSize = [](auto& d) { return d.size(); };
1105 
1106  std::vector<unsigned int> dimensionSizes;
1107  std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
1108  std::back_inserter(dimensionSizes), GetDimensionSize);
1109 
1110  // Calculates number of elements.
1111  const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
1112  unsigned int numElements = 0U;
1113 
1114  if (!dimensionSizes.empty())
1115  {
1116  numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
1117  1U, std::multiplies<unsigned int>());
1118  }
1119 
1120  std::vector<int8_t> tensorData;
1121 
1122  // Get tensor data from the list of values attribute.
1123  if (tfTensor.tensor_content().empty())
1124  {
1125  InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
1126 
1127  // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
1128  // tensor of the provided number of elements.
1129  if (numElements == 0)
1130  {
1131  const unsigned int tfNumElements =
1132  static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
1133  dimensionSizes.push_back(tfNumElements);
1134  }
1135  }
1136  // Gets tensor data from tensor content attribute.
1137  else
1138  {
1139  tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
1140 
1141  // Checks if a tensor shape is defined for the tensor content.
1142  if (numElements == 0)
1143  {
1144  throw ParseException(
1145  fmt::format("No tensor shape found for Const node - {} {}",
1146  nodeDef.name(),
1147  CHECK_LOCATION().AsString()));
1148  }
1149  }
1150 
1151  // Const node requires at least a list of values or a content attribute.
1152  if (tensorData.empty())
1153  {
1154  throw ParseException(
1155  fmt::format("No tensor data found for Const node - {} {}",
1156  nodeDef.name(),
1157  CHECK_LOCATION().AsString()));
1158  }
1159 
1160  const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
1161  dimensionSizes.data(),
1162  dataType);
1163 
1164  // If we have a list of values, then the length of the list must be
1165  // less than or equal to the number of elements implied by the shape argument.
1166  if (tensorData.size() > tensorInfo.GetNumBytes())
1167  {
1168  throw ParseException(
1169  fmt::format("Number of elements ({}) should be less than or equal "
1170  "to the number of elements implied by the shape argument ({}) for Const node - {} {}",
1171  (tensorData.size() / GetDataTypeSize(dataType)),
1172  tensorInfo.GetNumElements(),
1173  nodeDef.name(),
1174  CHECK_LOCATION().AsString()));
1175  }
1176 
1177  return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
1178  dataType, this, nodeDef, tensorData, tensorInfo);
1179 }
1180 
1181 template<typename Type>
1182 bool ITfParser::TfParserImpl::HasParsedConstTensor(const std::string & nodeName) const
1183 {
1184  auto it = m_ParsedTfOperations.find(nodeName);
1185  if (it == m_ParsedTfOperations.end())
1186  {
1187  return false;
1188  }
1189  return dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) != nullptr;
1190 }
1191 
1192 template<typename Type>
1194 {
1195  return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
1196 }
1197 
1198 unsigned int ITfParser::TfParserImpl::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
1199 {
1200  for (unsigned int i = 0; i < inputs.size(); i++)
1201  {
1202  if (HasParsedConstTensor<int32_t>(inputs[i].m_IndexedValue->GetNode().name()))
1203  {
1204  return i;
1205  }
1206  }
1207  throw ParseException(
1208  fmt::format("ArmNN only supports operators with constant axis. {}",
1209  CHECK_LOCATION().AsString()));
1210 
1211 }
1212 
1214  const tensorflow::GraphDef& graphDef)
1215 {
1216  IgnoreUnused(graphDef);
1217  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1218  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1219  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1220 
1221  if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1222  {
1223  throw ParseException(
1224  fmt::format("ArmNN only supports Convolution layers with constant weights for {}, input {} {}",
1225  nodeDef.name(),
1226  inputs[1].m_IndexedValue->GetNode().name(),
1227  CHECK_LOCATION().AsString()));
1228  }
1229  ParsedConstTfOperation<float>* weightNode =
1230  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1231 
1232  std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1233  std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1234  std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1235 
1237  desc.m_BiasEnabled = false;
1238 
1239  CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1240 
1241  DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1242 
1243  desc.m_DataLayout = dataLayout;
1244 
1245  DataLayoutIndexed dataLayoutIndexed(dataLayout);
1246 
1247  desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1248  desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
1249 
1250  std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1251  if (!dilations.empty())
1252  {
1253  desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
1254  desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
1255  }
1256 
1257  uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1258  uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1259 
1260  // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1261  // Tensorflow weights are [H, W, In, Out].
1262  // ArmNN weights have to be [Out, H, W, In] when the data layout is NHWC,
1263  // and [Out, In, H, W] when the data layout is NCHW.
1264  PermutationVector permutationVector =
1265  dataLayout == DataLayout::NHWC ?
1266  std::initializer_list<unsigned int>{ 1, 2, 3, 0 } : // NHWC: [H, W, In, Out] -> [Out, H, W, In]
1267  std::initializer_list<unsigned int>{ 2, 3, 1, 0 }; // NCHW: [H, W, In, Out] -> [Out, In, H, W]
1268 
1269  // Swizzle the tensor using the given permutation vector.
1270  const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1271  const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1272 
1273  // Swizzles the content of the tensor's permanent storage into a local storage.
1274  std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1275  armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1276  weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
1277 
1278  // Create a weight tensor with the newly swizzled data.
1279  ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1280 
1281  uint32_t weightHeight = weightTensor.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1282  uint32_t weightWidth = weightTensor.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1283 
1284  bool padding = false;
1285  TensorInfo outputInfo;
1286  unsigned int outputHeight = 0;
1287  unsigned int outputWidth = 0;
1288 
1289  CHECK_PADDING_TYPE(nodeDef, paddingString);
1290 
1291  if (paddingString == "SAME")
1292  {
1293  padding = true;
1294  }
1295  else if (paddingString == "VALID")
1296  {
1297  padding = false;
1298  }
1299 
1300  CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
1301  CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
1302 
1303  // Calculate output height and width
1304  unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
1305  unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
1306  outputWidth = 1 + (readWidth / desc.m_StrideX);
1307 
1308  unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
1309  unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
1310  outputHeight = 1 + (readHeight / desc.m_StrideY);
1311 
1312  switch (dataLayout)
1313  {
1314  case DataLayout::NHWC:
1315  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1316  outputHeight,
1317  outputWidth,
1318  weightTensor.GetShape()[0] },
1319  DataType::Float32);
1320  break;
1321  case DataLayout::NCHW:
1322  default:
1323  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1324  weightTensor.GetShape()[0],
1325  outputHeight,
1326  outputWidth },
1327  DataType::Float32);
1328  break;
1329  }
1330 
1331  IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc,
1332  weightTensor,
1333  EmptyOptional(),
1334  nodeDef.name().c_str());
1335  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1336  inputSlot.Connect(layer->GetInputSlot(0));
1337 
1338  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1339 }
1340 
1342  const tensorflow::GraphDef& graphDef)
1343 {
1344  IgnoreUnused(graphDef);
1345  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1346  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1347  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1348 
1349  if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1350  {
1351  throw ParseException(
1352  fmt::format("ArmNN only supports Depthwise Convolution layer with constant weights. "
1353  "Non const input found {} for node {} {}",
1354  inputs[1].m_IndexedValue->GetNode().name(),
1355  nodeDef.name(),
1356  CHECK_LOCATION().AsString()));
1357  }
1358 
1359  ParsedConstTfOperation<float>* weightNode =
1360  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1361 
1362  std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1363  std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1364  std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1365 
1367  desc.m_BiasEnabled = false;
1368 
1369  CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1370 
1371  DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1372 
1373  desc.m_DataLayout = dataLayout;
1374 
1375  DataLayoutIndexed dataLayoutIndexed(dataLayout);
1376 
1377  desc.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
1378  desc.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
1379  std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1380  if (!dilations.empty())
1381  {
1382  desc.m_DilationX = dilations[dataLayoutIndexed.GetWidthIndex()];
1383  desc.m_DilationY = dilations[dataLayoutIndexed.GetHeightIndex()];
1384  }
1385 
1386  uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
1387  uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
1388 
1389  // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
1390  // Tensorflow weights come in the format [H, W, I, M].
1391  // ArmNN weights have to be [M, I, H, W].
1392  PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
1393 
1394  // Swizzle the tensor using the given permutation vector.
1395  const TensorInfo& weightTensorInfo = weightNode->GetTensorInfo();
1396  const TensorInfo weightTensorSwizzledInfo = armnnUtils::Permuted(weightTensorInfo, permutationVector);
1397 
1398  // Swizzles the content of the tensor's permanent storage into a local storage.
1399  std::vector<float> weightTensorSwizzledData(weightTensorInfo.GetNumElements());
1400  armnnUtils::Permute(weightTensorSwizzledInfo.GetShape(), permutationVector,
1401  weightNode->GetStorage(), weightTensorSwizzledData.data(), sizeof(float));
1402 
1403  // Create a weight tensor with the newly swizzled data.
1404  ConstTensor weightTensor(weightTensorSwizzledInfo, weightTensorSwizzledData);
1405 
1406  uint32_t weightHeight = weightTensor.GetShape()[2];
1407  uint32_t weightWidth = weightTensor.GetShape()[3];
1408 
1409  bool padding = false;
1410  TensorInfo outputInfo;
1411  unsigned int outputHeight = 0;
1412  unsigned int outputWidth = 0;
1413 
1414  CHECK_PADDING_TYPE(nodeDef, paddingString);
1415 
1416  if (paddingString == "SAME")
1417  {
1418  padding = true;
1419  }
1420  else if (paddingString == "VALID")
1421  {
1422  padding = false;
1423  }
1424 
1425  CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, padding);
1426  CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, padding);
1427 
1428  // Calculate output height and width
1429  unsigned int dilatedFilterWidth = weightWidth + (desc.m_DilationX - 1) * (weightWidth - 1);
1430  unsigned int readWidth = (inputWidth + desc.m_PadLeft + desc.m_PadRight) - dilatedFilterWidth;
1431  outputWidth = 1 + (readWidth / desc.m_StrideX);
1432 
1433  unsigned int dilatedFilterHeight = weightHeight + (desc.m_DilationY - 1) * (weightHeight - 1);
1434  unsigned int readHeight = (inputHeight + desc.m_PadTop + desc.m_PadBottom) - dilatedFilterHeight;
1435  outputHeight = 1 + (readHeight / desc.m_StrideY);
1436 
1437  switch (dataLayout)
1438  {
1439  case DataLayout::NHWC:
1440  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1441  outputHeight,
1442  outputWidth,
1443  weightTensor.GetShape()[0] * weightTensor.GetShape()[1]},
1444  DataType::Float32);
1445  break;
1446  case DataLayout::NCHW:
1447  default:
1448  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1449  weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1450  outputHeight,
1451  outputWidth },
1452  DataType::Float32);
1453  break;
1454  }
1455 
1456  IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
1457  weightTensor,
1458  EmptyOptional(),
1459  nodeDef.name().c_str());
1460  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1461  inputSlot.Connect(layer->GetInputSlot(0));
1462 
1463  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1464 }
1465 
1466 TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef,
1467  TensorInfo inputTensorInfo,
1468  std::int32_t expandDim)
1469 {
1470  ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
1471 
1472  if (inputTensorInfo.GetNumDimensions() > 4) {
1473  throw ParseException(
1474  fmt::format("Unsupported number of dimensions: {} for input shape for ExpandDims {} {}",
1475  inputTensorInfo.GetNumDimensions(),
1476  nodeDef.name(),
1477  CHECK_LOCATION().AsString()));
1478  }
1479 
1480  std::int32_t inputDimSize = armnn::numeric_cast<int32_t>(inputTensorInfo.GetNumDimensions());
1481  std::vector<uint32_t> outputDims;
1482 
1483  // expandDim operation requires: -1-input.dims() <= dim <= input.dims()
1484  if (expandDim >= -1 - inputDimSize && expandDim <= inputDimSize)
1485  {
1486  // add current input shape to outputDims
1487  for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) {
1488  auto currentDimension = inputTensorInfo.GetShape()[i];
1489  outputDims.push_back(currentDimension);
1490  }
1491 
1492  // insert a dimension of 1 at index 'expandDim' of inputs shape
1493  if (expandDim >= 0)
1494  {
1495  auto getPosition = std::next(outputDims.begin() + 0, expandDim);
1496  outputDims.insert(getPosition, 1);
1497  }
1498 
1499  // if negative number for 'expandDim' then count backwards from the last element
1500  // and insert 1 dimension at index 'expandDim'
1501  if (expandDim < 0)
1502  {
1503  int outputDimSize = armnn::numeric_cast<int>(outputDims.size() + 1);
1504  auto getPosition = std::next(outputDims.begin() + outputDimSize, expandDim);
1505  outputDims.insert(getPosition, 1);
1506  }
1507  }
1508  else
1509  {
1511  fmt::format("Cannot expand dimension {} in input tensor with {} dimension {}",
1512  expandDim,
1513  inputDimSize,
1514  CHECK_LOCATION().AsString()));
1515  }
1516 
1517  if (outputDims.size() > 4)
1518  {
1519  throw ParseException(
1520  fmt::format("Unsupported number of dimensions: {} for output shape for ExpandDims {} {}",
1521  outputDims.size(),
1522  nodeDef.name(),
1523  CHECK_LOCATION().AsString()));
1524  }
1525 
1526  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1527  outputDims.data());
1528 
1529  TensorInfo outTensorInfo = inputTensorInfo;
1530  outTensorInfo.SetShape(outShape);
1531 
1532  return outTensorInfo;
1533 }
1534 
1536  const tensorflow::GraphDef& graphDef)
1537 {
1538  IgnoreUnused(graphDef);
1539 
1540  // Number of inputs can either
1541  // be 1 - that indicates that the axis parameter is passed as an attribute of the operation
1542  // or 2 - which means that the axis parameter is passed as a second input
1543  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1544  const std::size_t numInputs = nodes.size();
1545  std::vector<OutputOfParsedTfOperation> inputs;
1546  std::int32_t expandDim; // axis or dim parameter. Describes which dimension to expand.
1547  if (numInputs == 1)
1548  {
1549  inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1550  expandDim = ReadMandatoryNodeInt32Attribute(nodeDef, "Tdim");
1551  }
1552  else
1553  {
1554  inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1555 
1556  // make sure data type is int32
1557  IOutputSlot& prevLayerOutputSlot = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1558  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1559 
1560  if (inputTensorInfo.GetDataType()!=armnn::DataType::Signed32)
1561  {
1562  throw ParseException(
1563  fmt::format("The axis parameter of ExpandDims operation given as second input is not of type int32."
1564  " Input {0} Node {1} {2}",
1565  inputs[1].m_IndexedValue->GetNode().name(),
1566  nodeDef.name(),
1567  CHECK_LOCATION().AsString()));
1568  }
1569 
1570  // ensure the second input is a constant value
1571  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1572  {
1573  throw ParseException(
1574  fmt::format("ArmNN only supports ExpandDims layers with constant axis/dim parameter. "
1575  "Input {0} Node {1} {2}",
1576  inputs[1].m_IndexedValue->GetNode().name(),
1577  nodeDef.name(),
1578  CHECK_LOCATION().AsString()));
1579  }
1580 
1581  // make sure the second input is scalar or contains only a single value
1582  // (we don't support expand dims for multiple axis but we don't care what shape the
1583  // given tensor has as long as there is only a single value in it
1584  // e.g. a tensor like this [[[1]]] is completely fine)
1585  if (inputTensorInfo.GetNumElements() != 1)
1586  {
1587  throw ParseException(
1588  fmt::format("The axis parameter of ExpandDims operation given as second input is not "
1589  "allowed to hold more than one value. "
1590  "Input {0} Node {1} {2}",
1591  inputs[1].m_IndexedValue->GetNode().name(),
1592  nodeDef.name(),
1593  CHECK_LOCATION().AsString()));
1594  }
1595 
1596  ParsedConstTfOperation<int32_t>* expandDimsNode =
1597  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1598 
1599  memcpy(&expandDim, expandDimsNode->GetStorage(), sizeof(expandDim));
1600  }
1601 
1602  // First input is the vector that should be expanded by another dimension
1603  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1604  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1605 
1606  TensorInfo outputInfo;
1607  outputInfo = OutputShapeOfExpandDims(nodeDef, inputTensorInfo, expandDim);
1608 
1609  ReshapeDescriptor reshapeDesc;
1610  reshapeDesc.m_TargetShape = outputInfo.GetShape();
1611  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1612  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1613  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1614 
1615  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1616 }
1617 
1619  const tensorflow::GraphDef& graphDef)
1620 {
1621  IgnoreUnused(graphDef);
1622  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1623 
1624  if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1625  {
1626  throw ParseException(
1627  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant scale. "
1628  "Input {}. Node {} {}",
1629  inputs[1].m_IndexedValue->GetNode().name(),
1630  nodeDef.name(),
1631  CHECK_LOCATION().AsString()));
1632  }
1633  ParsedConstTfOperation<float>* scaleNode =
1634  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1635 
1636  if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1637  {
1638  throw ParseException(
1639  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant offset. "
1640  "Input {}. Node {} {}",
1641  inputs[2].m_IndexedValue->GetNode().name(),
1642  nodeDef.name(),
1643  CHECK_LOCATION().AsString()));
1644  }
1645  ParsedConstTfOperation<float>* offsetNode =
1646  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1647 
1648  if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1649  {
1650  throw ParseException(
1651  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant mean. "
1652  "Input {}. Node {} {}",
1653  inputs[3].m_IndexedValue->GetNode().name(),
1654  nodeDef.name(),
1655  CHECK_LOCATION().AsString()));
1656  }
1657  ParsedConstTfOperation<float>* meanNode =
1658  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1659 
1660  if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1661  {
1662  throw ParseException(
1663  fmt::format("ArmNN only supports FusedBatchNormalization layers with constant variance. "
1664  "Input {}. Node {} {}",
1665  inputs[4].m_IndexedValue->GetNode().name(),
1666  nodeDef.name(),
1667  CHECK_LOCATION().AsString()));
1668  }
1669  ParsedConstTfOperation<float>* varianceNode =
1670  PolymorphicDowncast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1671 
1672  const std::string dataFormat = ReadOptionalNodeStringAttribute(nodeDef, "data_format", "NHWC");
1673  CHECK_DATA_FORMAT(nodeDef, dataFormat, "FusedBatchNorm");
1674 
1675  // The descriptor only has the epsilon attribute.
1677  desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
1678  desc.m_DataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
1679 
1680  // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1681  // locally until the layer is added.
1682  std::vector<float> scaleTensorData;
1683  ConstTensor scaleTensor = scaleNode->GetConstTensor(scaleTensorData);
1684 
1685  std::vector<float> offsetTensorData;
1686  ConstTensor offsetTensor = offsetNode->GetConstTensor(offsetTensorData);
1687 
1688  std::vector<float> meanTensorData;
1689  ConstTensor meanTensor = meanNode->GetConstTensor(meanTensorData);
1690 
1691  std::vector<float> varianceTensorData;
1692  ConstTensor varianceTensor = varianceNode->GetConstTensor(varianceTensorData);
1693 
1694  IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1695  meanTensor,
1696  varianceTensor,
1697  offsetTensor,
1698  scaleTensor,
1699  nodeDef.name().c_str());
1700 
1701  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1702 
1703  layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1704  inputSlot.Connect(layer->GetInputSlot(0));
1705 
1706  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1707 }
1708 
1709 bool ITfParser::TfParserImpl::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1710  size_t alphaLayerIndex,
1711  const OutputOfParsedTfOperation& otherOp,
1712  armnn::IOutputSlot** outputOfLeakyRelu,
1714 {
1715  const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1716 
1717  // Verifying all these assumptions hold:
1718  //
1719  // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1720  // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1721  // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1722  //
1723 
1724  if (mulNodeDef.op() == "Mul")
1725  {
1726  size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1727  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1728 
1729  ARMNN_ASSERT(inputs.size() == 2);
1730  ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1731  ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1732  ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1733 
1734  if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1735  {
1736  if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1737  {
1738  ParsedConstTfOperation<float>* alpha =
1739  PolymorphicDowncast<ParsedConstTfOperation<float> *>(
1740  inputs[alphaLayerIndex].m_IndexedValue);
1741 
1742  std::vector<float> const_data;
1743  ConstTensor const_tensor = alpha->GetConstTensor(const_data);
1744 
1745  if (const_data.size() == 1)
1746  {
1747  desc.m_Function = ActivationFunction::LeakyReLu;
1748  desc.m_A = const_data[0];
1749 
1750  *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1751  return true;
1752  }
1753  }
1754  }
1755  }
1756  return false;
1757 }
1758 
1760  const tensorflow::GraphDef& graphDef)
1761 {
1762  IgnoreUnused(graphDef);
1763  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1764  if (inputs.size() != 2)
1765  {
1766  throw ParseException(
1767  fmt::format("Maximum expects two inputs!. Got {} for Node {} {}",
1768  inputs.size(),
1769  nodeDef.name(),
1770  CHECK_LOCATION().AsString()));
1771  }
1772 
1773  auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1774  auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1775  IOutputSlot* outputOfLeakyRelu = nullptr;
1776 
1777  ActivationDescriptor desc;
1778 
1779  // A max node may be part of a LeakyRelu, with one input as a multiplication with a scalar constant,
1780  // i.e. one of the four possible scenarios:
1781  // 1, max(mul(a, x), x)
1782  // 2, max(mul(x, a), x)
1783  // 3, max(x, mul(a, x))
1784  // 4, max(x, mul(x, a))
1785  // These are handled by an activation layer.
1786 
1787  if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1788  IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1789  IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1790  IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1791  {
1792  ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
1793 
1794  IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1795  outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1796  layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1797  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1798  }
1799  else
1800  {
1801  // Anything else is just a maximum layer.
1802 
1803  return AddMaximumLayer(nodeDef);
1804  }
1805 }
1806 
1807 std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> ITfParser::TfParserImpl::ProcessElementwiseInputSlots(
1808  const tensorflow::NodeDef& nodeDef, const std::string& layerName)
1809 {
1810  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1811 
1812  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1813  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1814  const unsigned int input0Dim = input0Slot->GetTensorInfo().GetNumDimensions();
1815  const unsigned int input1Dim = input1Slot->GetTensorInfo().GetNumDimensions();
1816 
1817  if (input0Dim != input1Dim)
1818  {
1819  // broadcasting where input0 and input1 have different number of dimensions
1820  // is only supported for 1D and 4D tensors pair
1821  if (input0Dim == 1 && input1Dim == 4)
1822  {
1823  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, true, *m_Network, nodeDef);
1824  }
1825  else if (input0Dim == 4 && input1Dim == 1)
1826  {
1827  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, true, *m_Network, nodeDef);
1828  }
1829  else
1830  {
1831  throw ParseException(
1832  fmt::format("Unsupported broadcast configuration for {} operation {} {}",
1833  layerName,
1834  nodeDef.name(),
1835  CHECK_LOCATION().AsString()));
1836  }
1837  }
1838  return {input0Slot, input1Slot};
1839 }
1840 
1842  IOutputSlot* input0Slot,
1843  IOutputSlot* input1Slot,
1844  IConnectableLayer* const layer,
1845  const tensorflow::NodeDef& nodeDef)
1846 {
1847  input0Slot->Connect(layer->GetInputSlot(0));
1848  input1Slot->Connect(layer->GetInputSlot(1));
1849 
1850  TensorInfo outputInfo = input0Slot->GetTensorInfo();
1851  outputInfo.SetDataType(DataType::Boolean);
1852  std::vector<unsigned int> outputShape;
1853 
1854  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1855  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1856 
1857  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1858  {
1859  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1860  }
1861 
1862  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1863  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1864 
1865  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1866 }
1867 
1869  IOutputSlot* input0Slot,
1870  IOutputSlot* input1Slot,
1871  IConnectableLayer* const layer,
1872  const tensorflow::NodeDef& nodeDef)
1873 {
1874  input0Slot->Connect(layer->GetInputSlot(0));
1875  input1Slot->Connect(layer->GetInputSlot(1));
1876 
1877  TensorInfo outputInfo = input0Slot->GetTensorInfo();
1878  std::vector<unsigned int> outputShape;
1879 
1880  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
1881  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
1882 
1883  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
1884  {
1885  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
1886  }
1887 
1888  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
1889  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1890 
1891  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1892 }
1893 
1895  const tensorflow::GraphDef& graphDef)
1896 {
1897  IgnoreUnused(graphDef);
1898  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1899  IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1900  IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1901  GatherDescriptor descriptor;
1902  descriptor.m_Axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
1903 
1904  // Infer shape of output tensor
1905  unsigned int paramsDim = params.GetTensorInfo().GetNumDimensions();
1906  unsigned int indicesDim = indices.GetTensorInfo().GetNumDimensions();
1907  unsigned int outputDim = paramsDim - 1 + indicesDim;
1908 
1909  std::vector<unsigned int> dimSizes;
1910 
1911  for (unsigned int i = 0; i < indicesDim; ++i)
1912  {
1913  dimSizes.push_back(indices.GetTensorInfo().GetShape()[i]);
1914  }
1915  for (unsigned int i = 1; i < paramsDim; ++i)
1916  {
1917  dimSizes.push_back(params.GetTensorInfo().GetShape()[i]);
1918  }
1919 
1920  const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
1921 
1922  const TensorInfo inferredOutputInfo(inferredShape, params.GetTensorInfo().GetDataType());
1923 
1924  IConnectableLayer* const layer = m_Network->AddGatherLayer(descriptor, nodeDef.name().c_str());
1925  layer->GetOutputSlot(0).SetTensorInfo(inferredOutputInfo);
1926 
1927  params.Connect(layer->GetInputSlot(0));
1928  indices.Connect(layer->GetInputSlot(1));
1929 
1930  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1931 }
1932 
1934  const tensorflow::GraphDef& graphDef)
1935 {
1936  IgnoreUnused(graphDef);
1937  std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
1938  IOutputSlot* input0Slot = inputLayers.first;
1939  IOutputSlot* input1Slot = inputLayers.second;
1940 
1941  ComparisonDescriptor descriptor(ComparisonOperation::Greater);
1942  IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
1943 
1944  return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1945 }
1946 
1948  const tensorflow::GraphDef& graphDef)
1949 {
1950  IgnoreUnused(graphDef);
1951  std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
1952  IOutputSlot* input0Slot = inputLayers.first;
1953  IOutputSlot* input1Slot = inputLayers.second;
1954 
1955  ComparisonDescriptor descriptor(ComparisonOperation::Equal);
1956  IConnectableLayer* const layer = m_Network->AddComparisonLayer(descriptor, nodeDef.name().c_str());
1957 
1958  return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
1959 }
1960 
1962  const tensorflow::GraphDef& graphDef)
1963 {
1964  IgnoreUnused(graphDef);
1965  std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
1966  IOutputSlot* input0Slot = inputLayers.first;
1967  IOutputSlot* input1Slot = inputLayers.second;
1968 
1969  IConnectableLayer* const layer = m_Network->AddMinimumLayer(nodeDef.name().c_str());
1970 
1971  return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
1972 }
1973 
1974 ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSub(const tensorflow::NodeDef& nodeDef,
1975  const tensorflow::GraphDef& graphDef)
1976 {
1977  IgnoreUnused(graphDef);
1978  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1979 
1980  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1981  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
1982 
1983  const TensorInfo& input0Info = input0Slot->GetTensorInfo();
1984  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
1985 
1986  if (input0Info.GetNumDimensions() == 1)
1987  {
1988  const bool isNHWC = true;
1989  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
1990  }
1991 
1992  if (input1Info.GetNumDimensions() == 1)
1993  {
1994  const bool isNHWC = true;
1995  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
1996  }
1997 
1998  IConnectableLayer* const layer = m_Network->AddSubtractionLayer(nodeDef.name().c_str());
1999 
2000  input0Slot->Connect(layer->GetInputSlot(0));
2001  input1Slot->Connect(layer->GetInputSlot(1));
2002 
2003  if (input0Info.GetNumDimensions() == 1)
2004  {
2005  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2006  }
2007  else
2008  {
2009  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2010  }
2011 
2012  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2013 }
2014 
2016  const tensorflow::GraphDef& graphDef)
2017 {
2018  IgnoreUnused(graphDef);
2019  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2020 
2021  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2022  if (numInputs < 1)
2023  {
2024  throw ParseException(
2025  fmt::format("Pack/Stack expects at least one input. Got {} for Node {} {}",
2026  numInputs,
2027  nodeDef.name(),
2028  CHECK_LOCATION().AsString()));
2029  }
2030 
2031  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2032  // Use the tensor shape of the first input as the "correct" input shape in the descriptor
2033  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2034  const TensorInfo& inputTensorInfo = input0Slot->GetTensorInfo();
2035  auto numDimensions = inputTensorInfo.GetShape().GetNumDimensions();
2036 
2037  // validate axis
2038  int32_t axis = ReadMandatoryNodeInt32Attribute(nodeDef, "axis");
2039  const int sNumDimensions = (static_cast<int>(numDimensions) + 1);
2040  if (!(axis < sNumDimensions && axis >= -sNumDimensions))
2041  {
2042  throw ParseException(
2043  fmt::format("Axis index is not in range. Got {} for Node {} {}",
2044  axis,
2045  nodeDef.name(),
2046  CHECK_LOCATION().AsString()));
2047  }
2048 
2049  if (axis < 0)
2050  {
2051  axis = static_cast<int32_t>(numDimensions) + axis + 1;
2052  }
2053 
2054  StackDescriptor stackDescriptor;
2055  stackDescriptor.m_Axis = static_cast<uint32_t>(axis);
2056  stackDescriptor.m_NumInputs = static_cast<uint32_t>(numInputs);
2057  stackDescriptor.m_InputShape = inputTensorInfo.GetShape();
2058 
2059  const unsigned int supportedNumDims = 4;
2060  for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2061  {
2062  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2063  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2064 
2065  // Double check dimensions of the tensors
2066  if (inputTensorInfo.GetNumDimensions() >= supportedNumDims)
2067  {
2068  throw armnn::ParseException(
2069  fmt::format("The number of dimensions: {} for input tensors of the "
2070  "Pack/Stack op. Number of dimensions should be less than {} {}",
2071  inputTensorInfo.GetNumDimensions(),
2072  supportedNumDims,
2073  CHECK_LOCATION().AsString()));
2074  }
2075  }
2076 
2077  std::vector<unsigned int> outputDimensions;
2078  for (unsigned int i = 0; i < stackDescriptor.m_InputShape.GetNumDimensions(); ++i)
2079  {
2080  outputDimensions.push_back(stackDescriptor.m_InputShape[i]);
2081  }
2082  outputDimensions.insert(outputDimensions.begin() + axis, numInputs);
2083 
2084  // add Stack Layer
2085  IConnectableLayer* const layer = m_Network->AddStackLayer(stackDescriptor, nodeDef.name().c_str());
2086 
2087  for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
2088  {
2089  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2090  inputSlot.Connect(layer->GetInputSlot(viewIndex));
2091  }
2092 
2093  layer->GetOutputSlot(0).SetTensorInfo(
2094  armnn::TensorInfo(static_cast<uint32_t>(outputDimensions.size()),
2095  outputDimensions.data(),
2096  inputTensorInfo.GetDataType()));
2097 
2098  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2099 }
2100 
2102  const tensorflow::GraphDef& graphDef)
2103 {
2104  IgnoreUnused(graphDef);
2105 
2106  auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2107  const auto inputCount = inputs.size();
2108 
2109  if (inputCount != 2)
2110  {
2111  throw ParseException(
2112  fmt::format("The number of given input is {}. It should be two for Transpose op."
2113  "Node {} {}",
2114  inputCount,
2115  nodeDef.name(),
2116  CHECK_LOCATION().AsString()));
2117  }
2118 
2119  auto* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2120 
2121  const auto constInput = inputs[GetConstInputIndex(inputs)];
2122  auto* permuteVectorInput =
2123  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(constInput.m_IndexedValue);
2124  const auto& permuteVectorInfo = permuteVectorInput->GetTensorInfo();
2125 
2126  std::vector<int32_t> permuteVectorData;
2127  permuteVectorInput->GetConstTensor(permuteVectorData);
2128 
2129  std::vector<unsigned int> armnnPermuteVectorData(permuteVectorData.begin(), permuteVectorData.end());
2130 
2131  const auto permutationVector = PermutationVector(armnnPermuteVectorData.data(), permuteVectorInfo.GetNumElements());
2132  const auto desc = TransposeDescriptor(permutationVector);
2133 
2134  auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
2135  ARMNN_ASSERT(layer);
2136 
2137  input0Slot->Connect(layer->GetInputSlot(0));
2138 
2139  const auto& input0Info = input0Slot->GetTensorInfo();
2140  armnn::TensorInfo outputInfo {input0Info};
2141  outputInfo.SetShape(armnnUtils::TransposeTensorShape(input0Info.GetShape(), desc.m_DimMappings));
2142  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2143 
2144  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2145 }
2146 
2147 unsigned int CheckPaddingTensor(const ConstTensor& paddingTensor,
2148  const TensorInfo& inputTensorInfo,
2149  const std::string& nodeName)
2150 {
2151  unsigned int rank = paddingTensor.GetShape()[0];
2152  unsigned int expectedRank = inputTensorInfo.GetNumDimensions();
2153  if (rank != expectedRank)
2154  {
2155  throw ParseException(
2156  fmt::format("Expected the padding tensor to be of rank {} not {} on Node {} {}.",
2157  expectedRank,
2158  rank,
2159  nodeName,
2160  CHECK_LOCATION().AsString()));
2161  }
2162  unsigned int second = paddingTensor.GetShape()[1];
2163  if (second != 2)
2164  {
2165  throw ParseException(
2166  fmt::format("Expected the padding tensor to be of dimensions "
2167  "[{1}, 2] not [{1}, {2}] on Node {3} {4}.",
2168  rank,
2169  second,
2170  nodeName,
2171  CHECK_LOCATION().AsString()));
2172  }
2173  return rank;
2174 }
2175 
2177  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
2178 {
2179  unsigned int numDims = inputTensorInfo.GetNumDimensions();
2180  std::vector<unsigned int> outDims;
2181  for (unsigned int i = 0; i < numDims; ++i)
2182  {
2183  unsigned int dimSize = inputTensorInfo.GetShape()[i];
2184  const std::pair<unsigned int, unsigned int>& dimPadding = padList[i];
2185  dimSize += dimPadding.first;
2186  dimSize += dimPadding.second;
2187  outDims.push_back(dimSize);
2188  }
2189  TensorInfo paddedTensorInfo = inputTensorInfo;
2190  unsigned int outDimsSize = static_cast<unsigned int>(outDims.size());
2191  paddedTensorInfo.SetShape(TensorShape{ outDimsSize, outDims.data() });
2192  return paddedTensorInfo;
2193 }
2194 
2195 ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePad(const tensorflow::NodeDef& nodeDef,
2196  const tensorflow::GraphDef& graphDef)
2197 {
2198  IgnoreUnused(graphDef);
2199  // input consists of:
2200  // input[0] the tensor which will be padded
2201  // input[1] the tensor holding the padding values
2202  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2203  IOutputSlot& previousLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2204  TensorInfo inputTensorInfo = previousLayerOutputSlot.GetTensorInfo();
2205  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue))
2206  {
2207  throw ParseException(
2208  fmt::format("ArmNN only supports Pad with constant padding. "
2209  "Input {}. Node {} {}",
2210  inputs[1].m_IndexedValue->GetNode().name(),
2211  nodeDef.name(),
2212  CHECK_LOCATION().AsString()));
2213 
2214  }
2215  ParsedConstTfOperation<int32_t>* paddingTensorOp =
2216  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2217 
2218  std::vector<int32_t> paddingTensorData;
2219  ConstTensor paddingTensor = paddingTensorOp->GetConstTensor(paddingTensorData);
2220  // paddings is an integer tensor with shape [n, 2], where n is the rank of tensor
2221  // and should match the rank of the input tensor that is being padded.
2222  // For each dimension D of input, paddings[D, 0] indicates how many values to add
2223  // before the contents of tensor in that dimension, and paddings[D, 1] indicates how
2224  // many values to add after the contents of tensor in that dimension
2225  // This needs to be translated into a padList for ACL
2226  std::vector<std::pair<unsigned int, unsigned int>> padList;
2227  unsigned int rank = CheckPaddingTensor(paddingTensor, inputTensorInfo, nodeDef.name());
2228  for (unsigned int i = 0; i < rank; ++i)
2229  {
2230  std::pair<unsigned int, unsigned int> paddingForDim;
2231  for (unsigned int j = 0; j < 2; j++)
2232  {
2233  unsigned int index = (i * 2) + j;
2234  int paddingAmount = paddingTensorData[index];
2235  // make sure we can cast to an unsigned value
2236  if (paddingAmount < 0)
2237  {
2238  throw ParseException(
2239  fmt::format("Negative amount {} specified at [{}, {}] of padding tensor on Node {} {}.",
2240  paddingAmount,
2241  i,
2242  j,
2243  nodeDef.name(),
2244  CHECK_LOCATION().AsString()));
2245  }
2246  if (j == 0)
2247  {
2248  paddingForDim.first = static_cast<unsigned int>(paddingAmount);
2249  }
2250  else
2251  {
2252  paddingForDim.second = static_cast<unsigned int>(paddingAmount);
2253  }
2254  }
2255  padList.push_back(paddingForDim);
2256  }
2257  PadDescriptor padDescriptor(padList);
2258  IConnectableLayer* layer = m_Network->AddPadLayer(padDescriptor, nodeDef.name().c_str());
2259  previousLayerOutputSlot.Connect(layer->GetInputSlot(0));
2260  // Use the padding to calculate the new output tensor shape
2261  TensorInfo outputTensorInfo = CalculatePaddedOutputTensorInfo(inputTensorInfo, padList);
2262  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2263  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2264 }
2265 
2267  const tensorflow::GraphDef& graphDef)
2268 {
2269  IgnoreUnused(graphDef);
2270  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2271 
2272  // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
2273  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2274 
2275  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2276 
2277  // Constant tensor index
2278  unsigned int index = GetConstInputIndex(inputs);
2279  // Get the axis tensor data
2280  ParsedConstTfOperation<int32_t>* shapeNode =
2281  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2282 
2283  std::vector<int32_t> axisTensorData;
2284  shapeNode->GetConstTensor(axisTensorData);
2285 
2286  // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2287  const unsigned int concatDim = static_cast<unsigned int>(axisTensorData[0]);
2288 
2289  // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
2290  if (concatDim == 0 || concatDim == 2)
2291  {
2292  throw ParseException(
2293  fmt::format("Dimension {} for concatenation is not supported by Armnn. "
2294  "Node {} {}",
2295  concatDim,
2296  nodeDef.name(),
2297  CHECK_LOCATION().AsString()));
2298  }
2299 
2300  const unsigned int supportedNumDims = 4;
2301  unsigned int numConcatViews = numInputs - 1;
2302  OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatViews), supportedNumDims);
2303  concatDescriptor.SetConcatAxis(concatDim);
2304  TensorShape mergeDims(supportedNumDims);
2305  unsigned int mergeDim = 0;
2306  for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2307  {
2308  // Need to double check whether it should be
2309  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2310  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2311 
2312  // Double check dimensions of the tensors
2313  if (inputTensorInfo.GetNumDimensions() != supportedNumDims)
2314  {
2315  throw armnn::ParseException(
2316  fmt::format("The number of dimensions: {} for input tensors of the "
2317  "concatenation op should be {} {}",
2318  inputTensorInfo.GetNumDimensions(),
2319  supportedNumDims,
2320  CHECK_LOCATION().AsString()));
2321  }
2322 
2323  // Copy the input tensor shape to mergeDimSizes and initialize the view origin coordinates for the current input
2324  mergeDims = inputTensorInfo.GetShape();
2325  unsigned int* viewOrigin = const_cast<unsigned int*>(concatDescriptor.GetViewOrigin(viewIndex));
2326  std::fill(viewOrigin, viewOrigin + supportedNumDims, 0);
2327 
2328  // Update the view origin coordinates and the merge dimension value
2329  concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
2330  mergeDim += mergeDims[concatDim];
2331  }
2332 
2333  // Update the output shape
2334  mergeDims[concatDim] = mergeDim;
2335  armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
2336 
2337  layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
2338 
2339  for (unsigned int viewIndex = 0; viewIndex < numConcatViews; ++viewIndex)
2340  {
2341  IOutputSlot& inputSlot = inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
2342  inputSlot.Connect(layer->GetInputSlot(viewIndex));
2343  }
2344 
2345  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2346 }
2347 
2349  const tensorflow::GraphDef& graphDef)
2350 {
2351  IgnoreUnused(graphDef);
2352  // Note: the Shape layer is handled in a special way, because:
2353  // 1. ARMNN doesn't support int32 tensors which it outputs.
2354  // 2. ARMNN works with statically shaped tensors which are known at parse time.
2355  // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
2356  // tensor which may be used as an input to other ops, most likely a Reshape.
2357 
2358  const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
2359  if (tfDataType != tensorflow::DT_INT32)
2360  {
2361  throw ParseException(
2362  fmt::format("Armnn only supports DT_INT32 as out_type. Got {} for Node {} {}",
2363  tensorflow::DataType_Name(tfDataType),
2364  nodeDef.name(),
2365  CHECK_LOCATION().AsString()));
2366  }
2367 
2368  const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2369  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2370  const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2371  unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
2372 
2373  std::vector<int32_t> shapeTensorData;
2374  shapeTensorData.reserve(prevLayerDimensions);
2375 
2376  for (unsigned int i=0; i<prevLayerDimensions; ++i)
2377  {
2378  shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
2379  }
2380 
2381  TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
2382 
2383  return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
2384  nodeDef,
2385  &shapeTensorData[0],
2386  shapeTensorInfo);
2387 }
2388 
2390  const tensorflow::GraphDef& graphDef)
2391 {
2392  IgnoreUnused(graphDef);
2393  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2394  ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
2395 
2396  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2397  {
2398  throw ParseException(
2399  fmt::format("ArmNN only supports Reshape layers with constant shapes. "
2400  "Input {} Node {} {}",
2401  inputs[1].m_IndexedValue->GetNode().name(),
2402  nodeDef.name(),
2403  CHECK_LOCATION().AsString()));
2404  }
2405  ParsedConstTfOperation<int32_t>* shapeNode =
2406  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2407 
2408  armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
2409  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2410 
2411  std::vector<int32_t> shapeTensorData;
2412  ConstTensor shapeTensor = shapeNode->GetConstTensor(shapeTensorData);
2413  const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
2414 
2415  TensorShape targetShape = outputTensorInfo.GetShape();
2416  ReshapeDescriptor reshapeDesc;
2417  reshapeDesc.m_TargetShape = targetShape;
2418 
2419  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2420  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2421  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2422 
2423  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2424 }
2425 
2427  const tensorflow::GraphDef& graphDef)
2428 {
2429  IgnoreUnused(graphDef);
2430  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2431 
2432  if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
2433  {
2434  throw ParseException(
2435  fmt::format("ArmNN only supports ResizeBilinear layers with constant sizes. "
2436  "Input {}. Node {} {}",
2437  inputs[1].m_IndexedValue->GetNode().name(),
2438  nodeDef.name(),
2439  CHECK_LOCATION().AsString()));
2440  }
2441  ParsedConstTfOperation<int32_t>* sizeNode =
2442  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2443 
2444  // Checks the align_corners attribute is not set.
2445  if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
2446  {
2447  throw ParseException(
2448  fmt::format("ArmNN only supports ResizeBilinear layers with align_corners set to false. "
2449  "Node {} {}",
2450  nodeDef.name(),
2451  CHECK_LOCATION().AsString()));
2452  }
2453 
2454  // Data for the parsed tensor args (size) must be stored locally.
2455  std::vector<int32_t> sizeTensorData;
2456  ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
2457 
2458  // The descriptor only has target height and width attributes, which we get from the size tensor.
2459  ResizeDescriptor desc;
2461  desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
2462  desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
2464 
2465  IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
2466 
2467  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2468  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2469  // The input shape is always in BHWC format, this will be swizzled below; for now,
2470  // get the batch and channels to make up the ArmNN output shape with the target size.
2471  unsigned int outBatch = inputTensorInfo.GetShape()[0];
2472  unsigned int outChannels = inputTensorInfo.GetShape()[3];
2473  unsigned int outHeight = desc.m_TargetHeight;
2474  unsigned int outWidth = desc.m_TargetWidth;
2475  TensorShape outShape({outBatch, outHeight, outWidth, outChannels });
2476  // The output DataType is always Float32, regardless of the input DataType.
2477  const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
2478  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2479 
2480  inputSlot.Connect(layer->GetInputSlot(0));
2481 
2482  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2483 }
2484 
2485 TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
2486 {
2487  ARMNN_ASSERT(nodeDef.op() == "Squeeze");
2488  tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
2489 
2490  DataType type;
2491  if (tfDataType == tensorflow::DT_FLOAT)
2492  {
2493  type = DataType::Float32;
2494  }
2495  else if (tfDataType == tensorflow::DT_INT32)
2496  {
2497  type = DataType::Signed32;
2498  }
2499  else
2500  {
2501  throw ParseException(
2502  fmt::format("Unsupported DataType {} for Squeeze operation {} {}",
2503  tensorflow::DataType_Name(tfDataType),
2504  nodeDef.name(),
2505  CHECK_LOCATION().AsString()));
2506  }
2507 
2508 
2509  if (inputTensorInfo.GetNumDimensions() > 4)
2510  {
2511  throw ParseException(
2512  fmt::format("Unsupported number of dimensions: {} for input shape for Squeeze {} {}",
2513  inputTensorInfo.GetNumDimensions(),
2514  nodeDef.name(),
2515  CHECK_LOCATION().AsString()));
2516  }
2517 
2518  std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
2519  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2520 
2521  if (squeezeDims.empty())
2522  {
2523  squeezeDims.assign(dimensionSequence,
2524  dimensionSequence+inputTensorInfo.GetNumDimensions());
2525  }
2526 
2527  std::vector<uint32_t> outputDims;
2528  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2529  {
2530  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2531  auto currentDimension = inputTensorInfo.GetShape()[i];
2532  if (skipSqueeze || currentDimension != 1)
2533  {
2534  outputDims.push_back(currentDimension);
2535  }
2536  }
2537 
2538  if (outputDims.size() > 4)
2539  {
2540  throw ParseException(
2541  fmt::format("Unsupported number of dimensions: {} for output shape for Squeeze {} {}",
2542  outputDims.size(),
2543  nodeDef.name(),
2544  CHECK_LOCATION().AsString()));
2545  }
2546 
2547  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2548  outputDims.data());
2549 
2550  TensorInfo outTensorInfo = inputTensorInfo;
2551  outTensorInfo.SetShape(outShape);
2552  outTensorInfo.SetDataType(type);
2553 
2554  return outTensorInfo;
2555 }
2556 
2558  const tensorflow::GraphDef& graphDef)
2559 {
2560  IgnoreUnused(graphDef);
2561  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2562 
2563  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2564  TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
2565 
2566  TensorInfo outputInfo;
2567  outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
2568 
2569  ReshapeDescriptor reshapeDesc;
2570  reshapeDesc.m_TargetShape = outputInfo.GetShape();
2571  IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
2572  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2573  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2574 
2575  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2576 }
2577 
2578 ParsedTfOperationPtr ITfParser::TfParserImpl::ParseLrn(const tensorflow::NodeDef& nodeDef,
2579  const tensorflow::GraphDef& graphDef)
2580 {
2581  IgnoreUnused(graphDef);
2582  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2583 
2584  NormalizationDescriptor normalizationDescriptor;
2585  normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
2586  normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
2587  normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
2588  normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
2589  normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
2590  normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
2591  normalizationDescriptor.m_DataLayout = armnn::DataLayout::NHWC;
2592 
2593  // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
2594  normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
2595 
2596  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2597  IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
2598  nodeDef.name().c_str());
2599  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2600  layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2601 
2602  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2603 }
2604 
2605 /// An ParsedTfOperation for a MatMul node.
2606 /// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
2607 /// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
2608 /// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
2609 ///
2611 {
2612 public:
2613  ParsedMatMulTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
2614  : DeferredSingleLayerParsedTfOperation(parser, node)
2615  {
2616  }
2617 
2618  void CreateLayerDeferred() override
2619  {
2620  ARMNN_ASSERT(m_Layer == nullptr);
2621  m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
2622  }
2623 };
2624 
2626  const tensorflow::GraphDef& graphDef)
2627 {
2628  IgnoreUnused(graphDef);
2629 
2630  // Defers the creation of the layer (see ParsedMatMulTfOperation).
2631  return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
2632 }
2633 
2635  const tensorflow::GraphDef& graphDef)
2636 {
2637  IgnoreUnused(graphDef);
2638  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2639  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2640  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2641 
2642  if (inputs.size() != 2)
2643  {
2644  throw ParseException(
2645  fmt::format("Mean expects two inputs!. Got {} for Node {} {}",
2646  inputs.size(),
2647  nodeDef.name(),
2648  CHECK_LOCATION().AsString()));
2649  }
2650 
2651  bool keepDims = ReadMandatoryNodeBoolAttribute(nodeDef, "keep_dims");
2652 
2653  ParsedConstTfOperation<int32_t>* axisNode =
2654  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
2655 
2656  const TensorInfo& axisTensorInfo = axisNode->GetTensorInfo();
2657 
2658  ConstTensor axisTensor(axisTensorInfo, axisNode->GetStorage());
2659  const int* axisData = static_cast<const int*>(axisTensor.GetMemoryArea());
2660 
2661  TensorInfo outputTensorInfo;
2662  MeanDescriptor meanDescriptor;
2663  meanDescriptor.m_KeepDims = keepDims;
2664 
2665  // Negative axis values are supported so that the process requires
2666  // to convert them into the corresponding positive ones.
2667  // Duplicate values are also removed.
2668  std::vector<int> rawAxisVector(axisData, axisData + axisTensorInfo.GetNumElements());
2669  std::set<unsigned int> positiveAxisSet;
2670  int rank = static_cast<int>(inputTensorInfo.GetNumDimensions());
2671 
2672  std::transform(rawAxisVector.begin(), rawAxisVector.end(),
2673  std::inserter(positiveAxisSet, positiveAxisSet.begin()),
2674  [rank](int i) -> unsigned int { return static_cast<unsigned int>((i + rank) % rank); });
2675 
2676  CalculateReducedOutputTensoInfo(inputTensorInfo, positiveAxisSet, keepDims, outputTensorInfo);
2677 
2678  if (inputTensorInfo.GetNumDimensions() > positiveAxisSet.size())
2679  {
2680  meanDescriptor.m_Axis.assign(positiveAxisSet.begin(), positiveAxisSet.end());
2681  }
2682 
2683  IConnectableLayer* layer = m_Network->AddMeanLayer(meanDescriptor, nodeDef.name().c_str());
2684  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2685  inputSlot.Connect(layer->GetInputSlot(0));
2686 
2687  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2688 }
2689 
2690 /// An ParsedTfOperation for a Mul node.
2691 /// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
2692 /// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
2693 /// and in these cases armnn doesn't need a separate layer for the Mul.
2694 ///
2696 {
2697 public:
2698  ParsedMulTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
2699  : DeferredSingleLayerParsedTfOperation(parser, node)
2700  {
2701  }
2702 
2703  void CreateLayerDeferred() override
2704  {
2705  ARMNN_ASSERT(m_Layer == nullptr);
2706  m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
2707  }
2708 };
2709 
2710 ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMul(const tensorflow::NodeDef& nodeDef,
2711  const tensorflow::GraphDef& graphDef)
2712 {
2713  IgnoreUnused(graphDef);
2714 
2715  return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
2716 }
2717 
2719  const tensorflow::GraphDef& graphDef)
2720 {
2721  IgnoreUnused(graphDef);
2722 
2723  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
2724 
2726 
2727  auto it = m_InputShapes.find(nodeDef.name());
2728  if (it == m_InputShapes.end())
2729  {
2730  throw ParseException(
2731  fmt::format("Missing input shape for Placeholder '{}' {}",
2732  nodeDef.name(),
2733  CHECK_LOCATION().AsString()));
2734  }
2735  TensorInfo tensorInfo(it->second, DataType::Float32);
2736 
2737  IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
2738 
2739  layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2740 
2741  TrackInputBinding(layer, layerId, tensorInfo);
2742 
2743  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2744 }
2745 
2747  const tensorflow::GraphDef& graphDef)
2748 {
2749  IgnoreUnused(graphDef);
2750  return AddRealDivLayer(nodeDef);
2751 }
2752 
2754  const tensorflow::GraphDef& graphDef)
2755 {
2756  IgnoreUnused(graphDef);
2757 
2758  ActivationDescriptor activationDesc;
2759  activationDesc.m_Function = ActivationFunction::ReLu;
2760  return AddActivationLayer(nodeDef, activationDesc);
2761 }
2762 
2764  const tensorflow::GraphDef& graphDef)
2765 {
2766  IgnoreUnused(graphDef);
2767 
2768  ActivationDescriptor activationDesc;
2769  activationDesc.m_Function = ActivationFunction::BoundedReLu;
2770  activationDesc.m_A = 6.0f;
2771  activationDesc.m_B = 0.0f;
2772 
2773  return AddActivationLayer(nodeDef, activationDesc);
2774 }
2775 
2777  const tensorflow::GraphDef& graphDef)
2778 {
2779  IgnoreUnused(graphDef);
2780 
2781  ActivationDescriptor activationDesc;
2782  activationDesc.m_Function = ActivationFunction::Sigmoid;
2783 
2784  return AddActivationLayer(nodeDef, activationDesc);
2785 }
2786 
2788  const tensorflow::GraphDef &graphDef)
2789 {
2790  IgnoreUnused(graphDef);
2791 
2792  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2793 
2794  ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
2795  IConnectableLayer* const layer = m_Network->AddElementwiseUnaryLayer(descriptor, nodeDef.name().c_str());
2796 
2797  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2798  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2799  layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2800 
2801  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2802 }
2803 
2805  const tensorflow::GraphDef& graphDef)
2806 {
2807  IgnoreUnused(graphDef);
2808 
2809  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2810 
2811  SoftmaxDescriptor softmaxDescriptor;
2812  IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
2813 
2814  IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2815  prevLayerSlot.Connect(layer->GetInputSlot(0));
2816  layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
2817 
2818  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2819 }
2820 
2822  const tensorflow::GraphDef& graphDef)
2823 {
2824  IgnoreUnused(graphDef);
2825 
2826  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2827  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2828  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2829 
2830  // Constant tensor index
2831  unsigned int index = GetConstInputIndex(inputs);
2832  // Get the axis tensor data
2833  ParsedConstTfOperation<int32_t>* shapeNode =
2834  PolymorphicDowncast<ParsedConstTfOperation<int32_t>*>(inputs[index].m_IndexedValue);
2835 
2836  std::vector<int32_t> axisTensorData;
2837  shapeNode->GetConstTensor(axisTensorData);
2838 
2839  // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
2840  const unsigned int splitDim = static_cast<unsigned int>(axisTensorData[0]);
2841 
2842  // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
2843  if (splitDim == 0 || splitDim == 2)
2844  {
2845  throw armnn::ParseException(
2846  fmt::format("Dimension {} for split is not supported by Armnn. "
2847  "Node {} {}",
2848  splitDim,
2849  nodeDef.name(),
2850  CHECK_LOCATION().AsString()));
2851  }
2852 
2853  // As Armnn only supports splitter outputs of the same shape, therefore num_split will be limited to an integer.
2854  uint32_t num_split = ReadMandatoryNodeUint32Attribute(nodeDef, "num_split");
2855 
2856  IOutputSlot& inputSlot = inputs[1 - index].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1 - index].m_Index);
2857  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2858 
2859  const unsigned int supportedNumDims = 4;
2860  auto inputDimSize = inputTensorInfo.GetNumDimensions();
2861 
2862  if (inputDimSize != supportedNumDims)
2863  {
2864  throw armnn::ParseException(
2865  fmt::format("The number of dimensions: {} for input tensors of the "
2866  "split op should be {} {}",
2867  inputTensorInfo.GetNumDimensions(),
2868  supportedNumDims,
2869  CHECK_LOCATION().AsString()));
2870  }
2871 
2872  std::vector<unsigned int> splitterDimSizes(inputDimSize);
2873 
2874  // Add current input shape to splitterDimSizes
2875  for (unsigned int i = 0; i < inputDimSize; ++i)
2876  {
2877  splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2878  }
2879 
2880  if (splitterDimSizes[splitDim] % num_split != 0)
2881  {
2882  throw ParseException("Number of splits must evenly divide the dimension");
2883  }
2884  splitterDimSizes[splitDim] /= num_split;
2885 
2886  SplitterDescriptor splitDesc(num_split);
2887  for (unsigned int g = 0; g < num_split; ++g)
2888  {
2889  // Set the size of the views.
2890  for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2891  {
2892  splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
2893  }
2894  splitDesc.SetViewOriginCoord(g, splitDim, splitterDimSizes[splitDim] * g);
2895  }
2896 
2897  IConnectableLayer *layer = m_Network->AddSplitterLayer(splitDesc, nodeDef.name().c_str());
2898 
2899  inputSlot.Connect(layer->GetInputSlot(0));
2900 
2901  TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
2902  splitterDimSizes.data());
2903 
2904  for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
2905  {
2906  layer->GetOutputSlot(i).SetTensorInfo(armnn::TensorInfo(outShape, inputTensorInfo.GetDataType()));
2907  }
2908 
2909  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2910 }
2911 
2913  const tensorflow::GraphDef& graphDef)
2914 {
2915  IgnoreUnused(graphDef);
2916 
2917  ActivationDescriptor activationDesc;
2918  activationDesc.m_Function = ActivationFunction::SoftReLu;
2919 
2920  return AddActivationLayer(nodeDef, activationDesc);
2921 }
2922 
2924  const tensorflow::GraphDef& graphDef)
2925 {
2926  IgnoreUnused(graphDef);
2927 
2928  std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
2929  unsigned int numInputs = static_cast<unsigned int>(nodes.size());
2930  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
2931 
2932  ParsedConstTfOperation<int32_t>* beginNode =
2933  PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[1].m_IndexedValue);
2934  std::vector<int32_t> beginTensorData;
2935  beginNode->GetConstTensor(beginTensorData);
2936 
2937  ParsedConstTfOperation<int32_t>* endNode =
2938  PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[2].m_IndexedValue);
2939  std::vector<int32_t> endTensorData;
2940  endNode->GetConstTensor(endTensorData);
2941 
2942  ParsedConstTfOperation<int32_t>* stridesNode =
2943  PolymorphicDowncast<ParsedConstTfOperation<int32_t> *>(inputs[3].m_IndexedValue);
2944  std::vector<int32_t> stridesTensorData;
2945  stridesNode->GetConstTensor(stridesTensorData);
2946 
2948  desc.m_Begin = beginTensorData;
2949  desc.m_End = endTensorData;
2950  desc.m_Stride = stridesTensorData;
2951  desc.m_BeginMask = ReadMandatoryNodeInt32Attribute(nodeDef, "begin_mask");
2952  desc.m_EndMask = ReadMandatoryNodeInt32Attribute(nodeDef, "end_mask");
2953  desc.m_EllipsisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "ellipsis_mask");
2954  desc.m_NewAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "new_axis_mask");
2955  desc.m_ShrinkAxisMask = ReadMandatoryNodeInt32Attribute(nodeDef, "shrink_axis_mask");
2957  IConnectableLayer* const layer = m_Network->AddStridedSliceLayer(desc, nodeDef.name().c_str());
2958 
2959  IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2960  TensorInfo inputTensorInfo = prevLayerSlot.GetTensorInfo();
2961 
2962  TensorInfo outputTensorInfo;
2963  CalculateStridedSliceOutputTensorInfo(inputTensorInfo, desc, outputTensorInfo);
2964 
2965  prevLayerSlot.Connect(layer->GetInputSlot(0));
2966  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
2967 
2968  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2969 }
2970 
2972  const tensorflow::GraphDef& graphDef)
2973 {
2974  IgnoreUnused(graphDef);
2975 
2976  ActivationDescriptor activationDesc;
2977  activationDesc.m_Function = ActivationFunction::TanH;
2978  activationDesc.m_A = 1.0f;
2979  activationDesc.m_B = 1.0f;
2980 
2981  return AddActivationLayer(nodeDef, activationDesc);
2982 }
2983 
2985  ActivationDescriptor& activationDesc)
2986 {
2987  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
2988 
2989  IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
2990 
2991  IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2992  prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
2993  layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
2994  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2995 }
2996 
2998  const tensorflow::GraphDef& graphDef)
2999 {
3000  return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
3001 }
3002 
3004  const tensorflow::GraphDef& graphDef)
3005 {
3006  return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
3007 }
3008 
3010  const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
3011 {
3012  IgnoreUnused(graphDef);
3013 
3014  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
3015  IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3016  TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
3017 
3018  if (inputs.size() != 1)
3019  {
3020  throw ParseException(
3021  fmt::format("2D Pooling expects one input!. Got {} for Node {} {}",
3022  inputs.size(),
3023  nodeDef.name(),
3024  CHECK_LOCATION().AsString()));
3025  }
3026 
3027  std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
3028  std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
3029  std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
3030  std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
3031 
3032  Pooling2dDescriptor pooling2dDescriptor;
3033  pooling2dDescriptor.m_PoolType = pooltype;
3034  pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
3035  pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
3036 
3037  CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
3038  DataLayout dataLayout = dataFormat == "NHWC" ? DataLayout::NHWC : DataLayout::NCHW;
3039  pooling2dDescriptor.m_DataLayout = dataLayout;
3040  DataLayoutIndexed dataLayoutIndexed(dataLayout);
3041 
3042  pooling2dDescriptor.m_StrideX = strides[dataLayoutIndexed.GetWidthIndex()];
3043  pooling2dDescriptor.m_StrideY = strides[dataLayoutIndexed.GetHeightIndex()];
3044  pooling2dDescriptor.m_PoolWidth = ksize[dataLayoutIndexed.GetWidthIndex()];
3045  pooling2dDescriptor.m_PoolHeight = ksize[dataLayoutIndexed.GetHeightIndex()];
3046 
3047  uint32_t inputHeight = inputTensorInfo.GetShape()[dataLayoutIndexed.GetHeightIndex()];
3048  uint32_t inputWidth = inputTensorInfo.GetShape()[dataLayoutIndexed.GetWidthIndex()];
3049 
3050  bool padding = false;
3051  TensorInfo outputInfo;
3052  unsigned int outputHeight = 0;
3053  unsigned int outputWidth = 0;
3054 
3055  CHECK_PADDING_TYPE(nodeDef, paddingString);
3056 
3057  if (paddingString == "SAME")
3058  {
3059  padding = true;
3060 
3061  outputHeight = static_cast<uint32_t>(ceil(static_cast<float>(inputHeight) /
3062  static_cast<float>(pooling2dDescriptor.m_StrideY)));
3063  outputWidth = static_cast<uint32_t>(ceil(static_cast<float>(inputWidth) /
3064  static_cast<float>(pooling2dDescriptor.m_StrideX)));
3065  }
3066  else if (paddingString == "VALID")
3067  {
3068  padding = false;
3069 
3070  outputHeight = static_cast<uint32_t>(ceil(
3071  static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
3072  static_cast<float>(pooling2dDescriptor.m_StrideY)));
3073  outputWidth = static_cast<uint32_t>(ceil(
3074  static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
3075  static_cast<float>(pooling2dDescriptor.m_StrideX)));
3076  }
3077 
3078  switch (dataLayout)
3079  {
3080  case DataLayout::NHWC:
3081  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3082  outputHeight,
3083  outputWidth,
3084  inputTensorInfo.GetShape()[3] },
3085  DataType::Float32);
3086  break;
3087  case DataLayout::NCHW:
3088  outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
3089  inputTensorInfo.GetShape()[1],
3090  outputHeight,
3091  outputWidth },
3092  DataType::Float32);
3093  break;
3094  }
3095 
3096  CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX, 1u,
3097  pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
3098  CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY, 1u,
3099  pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
3100 
3101 
3102  IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
3103  if (layer == nullptr)
3104  {
3105  throw ParseException(
3106  fmt::format("Failed to add pooling2d layer for {} {}",
3107  nodeDef.name(),
3108  CHECK_LOCATION().AsString()));
3109  }
3110 
3111  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3112 
3113  inputSlot.Connect(layer->GetInputSlot(0));
3114 
3115  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3116 }
3117 
3118 ParsedTfOperationPtr ITfParser::TfParserImpl::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
3119 {
3120  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3121 
3122  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3123  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3124 
3125  const TensorInfo& input0Info = input0Slot->GetTensorInfo();
3126  const TensorInfo& input1Info = input1Slot->GetTensorInfo();
3127 
3128  if (isBiasAdd)
3129  {
3130  // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
3131  // with the same data in the correct dimension for broadcast in addition.
3132  if(input1Info.GetNumDimensions() != 1)
3133  {
3134  throw ParseException(
3135  fmt::format("Unsupported bias for BiasAdd. It should be a 1D vector. "
3136  "Got {} dimensions for input {}. Node {} {}",
3137  input1Info.GetNumDimensions(),
3138  inputs[1].m_IndexedValue->GetNode().name(),
3139  nodeDef.name(),
3140  CHECK_LOCATION().AsString()));
3141  }
3142 
3143  const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
3144 
3145  CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
3146  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
3147  }
3148  else
3149  {
3150  if (input0Info.GetNumDimensions() == 1)
3151  {
3152  const bool isNHWC = true;
3153  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3154  }
3155 
3156  if (input1Info.GetNumDimensions() == 1)
3157  {
3158  const bool isNHWC = true;
3159  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3160  }
3161  }
3162 
3163  IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
3164 
3165  input0Slot->Connect(layer->GetInputSlot(0));
3166  input1Slot->Connect(layer->GetInputSlot(1));
3167 
3168  if (input0Info.GetNumDimensions() == input1Info.GetNumDimensions())
3169  {
3170  const TensorShape& input0Shape = input0Info.GetShape();
3171  const TensorShape& input1Shape = input1Info.GetShape();
3172 
3173  std::vector<unsigned int> outputShape;
3174  outputShape.reserve(input0Shape.GetNumDimensions());
3175  TensorInfo outputInfo(input0Info);
3176 
3177  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3178  {
3179  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3180  }
3181 
3182  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3183 
3184  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3185  }
3186  else if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
3187  {
3188  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3189  }
3190  else
3191  {
3192  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3193  }
3194 
3195  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3196 }
3197 
3199 {
3200  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3201 
3202  IConnectableLayer* const layer = m_Network->AddDivisionLayer(nodeDef.name().c_str());
3203  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3204  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3205 
3206  auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3207  auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3208 
3209 
3210  if (input0NumDims < input1NumDims)
3211  {
3212  const bool isNHWC = true;
3213  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3214  }
3215  if (input1NumDims < input0NumDims)
3216  {
3217  const bool isNHWC = true;
3218  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3219  }
3220 
3221  input0Slot->Connect(layer->GetInputSlot(0));
3222  input1Slot->Connect(layer->GetInputSlot(1));
3223 
3224  if (input0NumDims < input1NumDims)
3225  {
3226  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3227  }
3228  else
3229  {
3230  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3231 
3232  }
3233  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3234 }
3235 
3237 {
3238  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3239 
3240  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3241  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3242 
3243  auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3244  auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3245 
3246  if (input0NumDims < input1NumDims)
3247  {
3248  const bool isNHWC = true;
3249  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3250  }
3251  if (input1NumDims < input0NumDims)
3252  {
3253  const bool isNHWC = true;
3254  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3255  }
3256 
3257  IConnectableLayer* const layer = m_Network->AddMaximumLayer(nodeDef.name().c_str());
3258 
3259  input0Slot->Connect(layer->GetInputSlot(0));
3260  input1Slot->Connect(layer->GetInputSlot(1));
3261 
3262  TensorInfo outputInfo = input0Slot->GetTensorInfo();
3263  std::vector<unsigned int> outputShape;
3264 
3265  const TensorShape& input0Shape = input0Slot->GetTensorInfo().GetShape();
3266  const TensorShape& input1Shape = input1Slot->GetTensorInfo().GetShape();
3267 
3268  for (unsigned int i = 0; i < input0Shape.GetNumDimensions(); i++)
3269  {
3270  outputShape.push_back(std::max(input0Shape[i], input1Shape[i]));
3271  }
3272 
3273  outputInfo.SetShape(TensorShape(input0Shape.GetNumDimensions(), outputShape.data()));
3274  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3275 
3276  return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
3277 }
3278 
3280 {
3281  std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
3282 
3283  IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
3284  IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
3285  IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
3286 
3287  auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
3288  auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
3289 
3290  if (input0NumDims < input1NumDims)
3291  {
3292  const bool isNHWC = true;
3293  input0Slot = AddBroadcastReshapeLayer(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
3294  }
3295  if (input1NumDims < input0NumDims)
3296  {
3297  const bool isNHWC = true;
3298  input1Slot = AddBroadcastReshapeLayer(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
3299  }
3300 
3301  input0Slot->Connect(layer->GetInputSlot(0));
3302  input1Slot->Connect(layer->GetInputSlot(1));
3303 
3304  if (input0NumDims < input1NumDims)
3305  {
3306  layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
3307  }
3308  else
3309  {
3310  layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
3311  }
3312  return layer;
3313 }
3314 
3315 IConnectableLayer* ITfParser::TfParserImpl::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
3316  const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
3317 {
3318  // Finds bias const (if applicable).
3319  ParsedConstTfOperation<float>* biasNode = nullptr;
3320  if (addNodeDef != nullptr)
3321  {
3322  std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
3323  // Finds our inputs.
3324  if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
3325  {
3326  biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
3327  }
3328  else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
3329  {
3330  biasNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
3331  }
3332  else
3333  {
3334  throw ParseException(
3335  fmt::format("ArmNN only supports fully connected layers with constant bias. "
3336  "Inputs {} and {}. AddNode {}. MatMulNode {} {}",
3337  addInputs[0].m_IndexedValue->GetNode().name(),
3338  addInputs[1].m_IndexedValue->GetNode().name(),
3339  addNodeDef->name(),
3340  matMulNodeDef.name(),
3341  CHECK_LOCATION().AsString()));
3342  }
3343  }
3344 
3345  // Finds matmul inputs.
3346  ParsedConstTfOperation<float>* weightNode = nullptr;
3347  ParsedTfOperation* inputNode = nullptr;
3348  unsigned int inputIdx = 0;
3349  std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
3350  if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
3351  {
3352  weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
3353  inputNode = mulInputs[1].m_IndexedValue;
3354  inputIdx = mulInputs[1].m_Index;
3355  }
3356  else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
3357  {
3358  weightNode = PolymorphicDowncast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
3359  inputNode = mulInputs[0].m_IndexedValue;
3360  inputIdx = mulInputs[0].m_Index;
3361  }
3362  else
3363  {
3364  throw ParseException(
3365  fmt::format("ArmNN only supports fully connected layers with constant weights. "
3366  "Inputs {} and {}. MatMulNode {} {}",
3367  mulInputs[0].m_IndexedValue->GetNode().name(),
3368  mulInputs[1].m_IndexedValue->GetNode().name(),
3369  matMulNodeDef.name(),
3370  CHECK_LOCATION().AsString()));
3371  }
3372 
3373  std::vector<float> weightTensorData;
3374  // Handles weight.
3375  ConstTensor weights = weightNode->GetConstTensor(weightTensorData);
3376 
3378  desc.m_BiasEnabled = addNodeDef != nullptr;
3379 
3380  IConnectableLayer* layer = nullptr;
3381  Optional<ConstTensor> optionalBiases;
3382  std::vector<float> biasTensorData;
3383  // Makes the layer.
3384  if (addNodeDef != nullptr)
3385  {
3386  ConstTensor biases = biasNode->GetConstTensor(biasTensorData);
3387 
3388  if (weights.GetShape()[1] != biases.GetShape()[0])
3389  {
3390  throw ParseException(
3391  fmt::format("Shape of matmul weights and bias do not match. "
3392  "AddNode {}. MatMulNode {} {}",
3393  addNodeDef->name(),
3394  matMulNodeDef.name(),
3395  CHECK_LOCATION().AsString()));
3396  }
3397 
3398  optionalBiases = Optional<ConstTensor>(biases);
3399  }
3400  layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
3401 
3402  ARMNN_ASSERT(layer != nullptr);
3403 
3404  inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
3405  unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
3406 
3407  // Handles output.
3408  TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
3409  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3410  return layer;
3411 }
3412 
3413 void ITfParser::TfParserImpl::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
3414 {
3415  // Gets the type of the node (assume float).
3416  tensorflow::DataType type = tensorflow::DT_FLOAT;
3417  if (nodeDef.attr().count("T") != 0)
3418  {
3419  auto attr = nodeDef.attr().at("T");
3420  type = attr.type();
3421  }
3422  else if (nodeDef.attr().count("dtype") != 0)
3423  {
3424  auto attr = nodeDef.attr().at("dtype");
3425  type = attr.type();
3426  }
3427 
3428  if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
3429  {
3430  throw ParseException(
3431  fmt::format("Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
3432  "Got {} for Node {} {}",
3433  tensorflow::DataType_Name(type),
3434  nodeDef.name(),
3435  CHECK_LOCATION().AsString()));
3436  }
3437 
3438  const std::string& operation = nodeDef.op();
3439  auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
3440  if (itControlInput != m_ControlInputs.end())
3441  {
3442  // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
3443  return;
3444  }
3445  auto it = ms_OperationNameToParsingFunctions.find(operation);
3446  if (it != ms_OperationNameToParsingFunctions.end())
3447  {
3448  auto func = it->second;
3449  ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
3450  ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
3451 
3452  // Stores the parsed operation so that dependent layers can connect to it.
3453  auto it = m_ParsedTfOperations.find(nodeDef.name());
3454  if (it != m_ParsedTfOperations.end())
3455  {
3456  throw ParseException(fmt::format("Name {} used by more than one node", nodeDef.name()));
3457  }
3458  m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
3459 
3460  // If this node was requested as an output from the network, then adds an ArmNN output layer.
3461  if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
3462  m_RequestedOutputs.end())
3463  {
3464  auto outId = ParseOutputId(nodeDef.name());
3466  IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
3467 
3468  TensorInfo tensorInfo = prevSlot.GetTensorInfo();
3469 
3470  IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
3471 
3472  prevSlot.Connect(outputLayer->GetInputSlot(0));
3473 
3474  TrackOutputBinding(outputLayer, layerId, tensorInfo);
3475  }
3476  }
3477  else
3478  {
3479  throw ParseException(
3480  fmt::format("Unsupported operation {} in tensorflow::GraphDef {}",
3481  operation,
3482  CHECK_LOCATION().AsString()));
3483  }
3484 }
3485 
3486 void ITfParser::TfParserImpl::LoadGraphDef(const tensorflow::GraphDef& graphDef)
3487 {
3488  // Adds all nodes to our map.
3489  m_NodesByName.clear();
3492 
3493  for (int i = 0; i < graphDef.node_size(); ++i)
3494  {
3495  const tensorflow::NodeDef& node = graphDef.node(i);
3496  m_NodesByName[node.name()] = &node;
3497  }
3498 
3499  // Checks that the input nodes the user has requested exist.
3500  for (const auto& pair : m_InputShapes)
3501  {
3502  const std::string& requestedInputName = pair.first;
3503  auto nodeIt = m_NodesByName.find(requestedInputName);
3504  if (nodeIt == m_NodesByName.end())
3505  {
3506  throw ParseException(
3507  fmt::format("Couldn't find requested input node '{}' in graph {}",
3508  requestedInputName,
3509  CHECK_LOCATION().AsString()));
3510  }
3511  }
3512 
3513  // Finds the output nodes the user requested.
3514  std::vector<const tensorflow::NodeDef*> targetNodes;
3515  for (const std::string& requestedOutputName : m_RequestedOutputs)
3516  {
3517  auto nodeIt = m_NodesByName.find(requestedOutputName);
3518  if (nodeIt == m_NodesByName.end())
3519  {
3520  throw ParseException(
3521  fmt::format("Couldn't find requested output node '{}' in graph {}",
3522  requestedOutputName,
3523  CHECK_LOCATION().AsString()));
3524  }
3525  targetNodes.push_back(nodeIt->second);
3526  }
3527 
3528  // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
3529  std::vector<const tensorflow::NodeDef*> sortedNodes;
3530  if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
3531  targetNodes,
3532  [this](const tensorflow::NodeDef* node)
3533  {
3534  auto outputs = GetTfInputNodes(*node);
3535  std::vector<const tensorflow::NodeDef*> nodesOnly;
3536  for (const auto & o : outputs) {
3537  nodesOnly.push_back(o.m_IndexedValue);
3538  }
3539  return nodesOnly;
3540  },
3541  sortedNodes))
3542  {
3543  throw ParseException(
3544  fmt::format("Cycle detected in graph {}",
3545  CHECK_LOCATION().AsString()));
3546  }
3547 
3548  // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
3549  for (const auto& it : sortedNodes)
3550  {
3551  const tensorflow::NodeDef& currentNode = *it;
3552  LoadNodeDef(currentNode, graphDef);
3553  }
3554 }
3555 
3557  const std::map<std::string, TensorShape>& inputShapes,
3558  const std::vector<std::string>& requestedOutputs)
3559 {
3560  FILE* fd = fopen(graphFile, "r");
3561 
3562  if (fd == nullptr)
3563  {
3564  throw FileNotFoundException(
3565  fmt::format("Graph file {} failed to open {}",
3566  graphFile,
3567  CHECK_LOCATION().AsString()));
3568  }
3569 
3570  // Parses the file into a message.
3571  tensorflow::GraphDef graphDef;
3572  auto input = new google::protobuf::io::FileInputStream(fileno(fd));
3573  bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
3574  delete input;
3575  fclose(fd);
3576 
3577  if (!success)
3578  {
3579  throw ParseException(
3580  fmt::format("Failed to parse graph file {}",
3581  CHECK_LOCATION().AsString()));
3582  }
3583 
3584  return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3585 }
3586 
3588  const std::map<std::string, TensorShape>& inputShapes,
3589  const std::vector<std::string>& requestedOutputs)
3590 {
3591  // Parses the string into a message.
3592  tensorflow::GraphDef graphDef;
3593  bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
3594 
3595  if (!success)
3596  {
3597  throw ParseException(
3598  fmt::format("Failed to parse graph file {}",
3599  CHECK_LOCATION().AsString()));
3600  }
3601 
3602  return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3603 }
3604 
3606  const std::map<std::string, TensorShape>& inputShapes,
3607  const std::vector<std::string>& requestedOutputs)
3608 {
3609  FILE* fd = fopen(graphFile, "rb");
3610 
3611  if (fd == nullptr)
3612  {
3613  throw FileNotFoundException(
3614  fmt::format("Graph file {} failed to open {}",
3615  graphFile,
3616  CHECK_LOCATION().AsString()));
3617  }
3618 
3619  // Parses the file into a message.
3620  tensorflow::GraphDef graphDef;
3621 
3622  google::protobuf::io::FileInputStream inStream(fileno(fd));
3623  google::protobuf::io::CodedInputStream codedStream(&inStream);
3624  codedStream.SetTotalBytesLimit(INT_MAX);
3625  bool success = graphDef.ParseFromCodedStream(&codedStream);
3626  fclose(fd);
3627 
3628  if (!success)
3629  {
3630  throw ParseException(
3631  fmt::format("Failed to parse protobuf file {} {}",
3632  graphFile,
3633  CHECK_LOCATION().AsString()));
3634  }
3635 
3636  return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
3637 }
3638 
3640  const std::map<std::string, TensorShape>& inputShapes,
3641  const std::vector<std::string>& requestedOutputs)
3642 {
3643  m_Network = INetwork::Create();
3644 
3645  m_InputShapes = inputShapes;
3646  if (requestedOutputs.size() == 0)
3647  {
3648  throw ParseException(
3649  fmt::format("requestedOutputs must have at least one entry {}",
3650  CHECK_LOCATION().AsString()));
3651  }
3652  m_RequestedOutputs = requestedOutputs;
3653 
3654  try
3655  {
3656  LoadGraphDef(graphDef);
3657  }
3658  catch (const ParseException& e)
3659  {
3660  Cleanup();
3661  throw e;
3662  }
3663 
3664  Cleanup();
3665 
3666  return std::move(m_Network);
3667 }
3668 
3670 {
3671  // Cleanup, in case we reuse this parser.
3672  m_InputShapes.clear();
3673  m_RequestedOutputs.clear();
3674  m_NodesByName.clear();
3675  m_ParsedTfOperations.clear();
3676 }
3677 
3679 {
3680  return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
3681 }
3682 
3684 {
3685  return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
3686 }
3687 
3688 std::pair<LayerBindingId, TensorInfo> ITfParser::TfParserImpl::GetBindingInfo(const std::string& layerName,
3689  const char* bindingPointDesc,
3690  const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3691 {
3692  auto it = nameToBindingInfo.find(layerName);
3693  if (it == nameToBindingInfo.end())
3694  {
3696  fmt::format("Unknown {} '{}' {}",
3697  bindingPointDesc,
3698  layerName,
3699  CHECK_LOCATION().AsString()));
3700  }
3701  return it->second;
3702 }
3703 
3705  LayerBindingId id,
3706  const TensorInfo& tensorInfo)
3707 {
3708  return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
3709 }
3710 
3712  LayerBindingId id,
3713  const TensorInfo& tensorInfo)
3714 {
3715  return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
3716 }
3717 
3719  LayerBindingId id,
3720  const TensorInfo& tensorInfo,
3721  const char* bindingPointDesc,
3722  std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
3723 {
3724  const std::string layerName = layer->GetName();
3725  auto it = nameToBindingInfo.find(layerName);
3726  if (it == nameToBindingInfo.end())
3727  {
3728  nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
3729  }
3730  else
3731  {
3732  throw ParseException(
3733  fmt::format("Id {} used by more than one {} layer {}",
3734  id,
3735  bindingPointDesc,
3736  CHECK_LOCATION().AsString()));
3737  }
3738 }
3739 
3741 {
3742  return TF_PARSER_VERSION;
3743 }
3744 
3745 } // namespace armnnTfParser
friend class ParsedConstTfOperation
Definition: ITfParser.hpp:58
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
std::unique_ptr< ITfParser, void(*)(ITfParser *parser)> ITfParserPtr
Definition: ITfParser.hpp:22
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Create the network from a protobuf text file on the disk.
Definition: TfParser.cpp:58
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
ParsedTfOperationPtr ParseMaximum(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1759
std::map< std::string, armnn::TensorShape > m_InputShapes
Definition: TfParser.hpp:261
ParsedTfOperationPtr ParsePlaceholder(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2718
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network from a protobuf text file on the disk.
Definition: TfParser.cpp:3556
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
ParsedTfOperationPtr ParseMean(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2634
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
Definition: TfParser.cpp:3718
ParsedTfOperationPtr ParseTanh(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2971
const tensorflow::NodeDef * ResolveIdentityNode(const tensorflow::NodeDef *nodeDef)
Handling identity layers as the input for Conv2D layer.
Definition: TfParser.cpp:546
DataLayout
Definition: Types.hpp:50
unsigned int GetWidthIndex() const
float m_K
Kappa value used for the across channel normalization equation.
WithOutputTensorIndex< ParsedTfOperation * > OutputOfParsedTfOperation
Definition: TfParser.hpp:60
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
void TrackInputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
Definition: TfParser.cpp:3704
armnn::BindingPointInfo BindingPointInfo
Definition: ITfParser.hpp:19
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
friend class ParsedIdentityTfOperation
Definition: ITfParser.hpp:64
WithOutputTensorIndex< std::string > OutputId
Definition: TfParser.hpp:62
ParsedTfOperationPtr ParseStack(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2015
std::vector< int > m_Begin
Begin values for the input that will be sliced.
ParsedTfOperationPtr ParsePooling2d(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef, armnn::PoolingAlgorithm pooltype)
Definition: TfParser.cpp:3009
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
TensorShape m_InputShape
Required shape of all input tensors.
BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const
Retrieves binding info (layer id and tensor info) for the network input identified by the given layer...
Definition: TfParser.cpp:3678
uint32_t m_PoolWidth
Pooling width value.
ParsedTfOperationPtr ParseMul(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2710
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
armnn::IConnectableLayer * AddMultiplicationLayer(const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:3279
uint32_t m_PadLeft
Padding left value in the width dimension.
ParsedTfOperationPtr ParseTranspose(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2101
void LoadGraphDef(const tensorflow::GraphDef &graphDef)
Sets up variables and then performs BFS to parse all nodes.
Definition: TfParser.cpp:3486
void CalculateReducedOutputTensoInfo(const armnn::TensorInfo &inputTensorInfo, const std::set< unsigned int > &axisSet, bool keepDims, armnn::TensorInfo &outputTensorInfo)
Creates a tensor info after reducing the dimensions mentioned in axisData.
armnn::IConnectableLayer * AddFullyConnectedLayer(const tensorflow::NodeDef &matMulNodeDef, const tensorflow::NodeDef *addNodeDef, const char *armnnLayerName)
Definition: TfParser.cpp:3315
const TensorShape & GetShape() const
Definition: Tensor.hpp:284
unsigned int GetNumBytes() const
Definition: Tensor.cpp:418
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
ParsedTfOperationPtr ParseReshape(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2389
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
ParsedTfOperationPtr ParseFusedBatchNorm(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1618
unsigned int CheckPaddingTensor(const ConstTensor &paddingTensor, const TensorInfo &inputTensorInfo, const std::string &nodeName)
Definition: TfParser.cpp:2147
friend class ParsedTfOperation
Definition: ITfParser.hpp:61
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:178
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
armnn::INetworkPtr m_Network
The network we&#39;re building. Gets cleared after it is passed to the user.
Definition: TfParser.hpp:251
int32_t m_BeginMask
Begin mask value.
std::unordered_map< std::string, ParsedTfOperationPtr > m_ParsedTfOperations
Definition: TfParser.hpp:267
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
PoolingAlgorithm
Definition: Types.hpp:104
std::vector< std::string > m_RequestedOutputs
Definition: TfParser.hpp:262
const armnn::PermutationVector NHWCToArmNN
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
uint32_t m_DilationY
Dilation factor value for height dimension.
ParsedTfOperationPtr ParseIdentity(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:891
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::IConnectableLayer * CreateAdditionLayer(const tensorflow::NodeDef &nodeDef, armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, const std::string &layerName)
Definition: TfParser.cpp:650
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
unsigned int GetHeightIndex() const
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
A ResizeDescriptor for the ResizeLayer.
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
Maps input layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:270
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
TensorShape m_TargetShape
Target shape value.
armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef &graphDef, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
Definition: TfParser.cpp:3639
bool IsSupportedLeakyReluPattern(const tensorflow::NodeDef &mulNodeDef, size_t alphaLayerIndex, const OutputOfParsedTfOperation &otherOp, armnn::IOutputSlot **outputOfLeakyRelu, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:1709
std::unique_ptr< ParsedTfOperation > ParsedTfOperationPtr
Definition: TfParser.hpp:35
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo, std::int32_t expandDim)
Definition: TfParser.cpp:1466
ParsedTfOperationPtr ParseMatMul(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2625
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
A PadDescriptor for the PadLayer.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
WithOutputTensorIndex< const tensorflow::NodeDef * > OutputOfConstNodeDef
Definition: TfParser.hpp:61
ParsedTfOperationPtr ParseStridedSlice(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2923
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
friend struct MakeTfOperation
Definition: ITfParser.hpp:67
std::pair< armnn::IOutputSlot *, armnn::IOutputSlot * > ProcessElementwiseInputSlots(const tensorflow::NodeDef &nodeDef, const std::string &layerName)
Definition: TfParser.cpp:1807
ParsedTfOperationPtr ParseLrn(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2578
Layer * m_Layer
ParsedTfOperationPtr ParseDepthwiseConv2D(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1341
DataType
Definition: Types.hpp:32
ParsedTfOperationPtr ParseRelu6(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2763
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
armnn::INetworkPtr CreateNetworkFromString(const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network directly from protobuf text in a string. Useful for debugging/testing.
Definition: TfParser.cpp:3587
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
An output connection slot for a layer.
Definition: INetwork.hpp:38
BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const
Retrieves binding info (layer id and tensor info) for the network output identified by the given laye...
Definition: TfParser.cpp:3683
#define TF_PARSER_VERSION
TF_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version...
Definition: Version.hpp:25
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
ParsedTfOperationPtr ProcessElementwiseLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:1868
bool m_BiasEnabled
Enable/disable bias.
WithOutputTensorIndex wraps a value and an index.
Definition: TfParser.hpp:46
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
friend class ParsedMatMulTfOperation
Definition: ITfParser.hpp:59
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
ParsedTfOperationPtr ParseGreater(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1933
void CalcPadding(uint32_t inputSize, uint32_t filterSize, uint32_t stride, uint32_t dilation, uint32_t &paddingFront, uint32_t &paddingBack, bool samePadding)
Definition: TfParser.cpp:429
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
ParsedTfOperationPtr ParseAdd(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:826
ParsedTfOperationPtr ParseConst(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1086
ParsedTfOperationPtr ParseSoftplus(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2912
std::vector< int > m_Stride
Stride values for the input that will be sliced.
ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:3198
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
ParsedTfOperationPtr ParseSoftmax(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2804
ParsedTfOperationPtr ParseMinimum(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1961
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetDataType(DataType type)
Definition: Tensor.hpp:195
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
ParsedTfOperationPtr ParseRsqrt(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2787
ParsedTfOperationPtr ParseAvgPool(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:3003
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CalculateStridedSliceOutputTensorInfo(const armnn::TensorInfo &inputTensorInfo, const armnn::StridedSliceDescriptor &desc, armnn::TensorInfo &outputTensorInfo)
Create output tensor info for a StridedSlice operator.
std::vector< int > m_End
End values for the input that will be sliced.
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:952
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
ParsedTfOperationPtr ParseConcat(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2266
#define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE)
Definition: TfParser.cpp:356
ParsedTfOperationPtr ParseSplit(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2821
ParsedTfOperationPtr ParseAddN(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:747
ParsedTfOperationPtr ParseResizeBilinear(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2426
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)
Adds a reshape layer to the network.
Definition: Network.cpp:337
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
static const std::list< std::string > m_ControlInputs
Definition: TfParser.hpp:259
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
ParsedTfOperationPtr ParseRelu(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2753
ParsedTfOperationPtr ParseConv2D(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1213
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network from a protobuf binary file on the disk.
Definition: TfParser.cpp:3605
Parses a directed acyclic graph from a tensorflow protobuf file.
Definition: ITfParser.hpp:25
ParsedTfOperationPtr ParseSqueeze(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2557
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
ParsedTfOperationPtr AddAdditionLayer(const tensorflow::NodeDef &nodeDef, bool isBiasAdd=false)
Definition: TfParser.cpp:3118
unsigned int GetConstInputIndex(const std::vector< OutputOfParsedTfOperation > &inputs)
Definition: TfParser.cpp:1198
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
ParsedTfOperationPtr ParseShape(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2348
std::vector< OutputOfParsedTfOperation > GetInputParsedTfOperationsChecked(const tensorflow::NodeDef &nodeDef, std::size_t expectedNumInputs)
Finds the IParsedTfOperations for the nodes connected as inputs of the given node in the graph...
Definition: TfParser.cpp:615
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
ParsedTfOperationPtr ParseExpandDims(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1535
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
ParsedTfOperationPtr ParseBiasAdd(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:857
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
std::vector< OutputOfConstNodeDef > GetTfInputNodes(const tensorflow::NodeDef &nodeDef) const
Finds the nodes connected as inputs of the given node in the graph.
Definition: TfParser.cpp:578
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
ParsedTfOperationPtr ParsePad(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2195
std::unordered_map< std::string, const tensorflow::NodeDef * > m_NodesByName
Map of nodes extracted from the GraphDef to speed up parsing.
Definition: TfParser.hpp:265
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
virtual const char * GetName() const =0
Returns the name of the layer.
static const std::string GetVersion()
Retrieve version in X.Y.Z form.
Definition: TfParser.cpp:3740
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:98
ParsedTfOperationPtr ParseEqual(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1947
#define CHECK_PADDING_TYPE(NODE_DEF, PADDING)
Definition: TfParser.cpp:368
static const std::map< std::string, OperationParsingFunction > ms_OperationNameToParsingFunctions
Map of TensorFlow operation names to parsing member functions.
Definition: TfParser.hpp:257
bool HasParsedConstTensor(const std::string &nodeName) const
Checks if there is a pre-parsed const tensor available with the given name and Type.
Definition: TfParser.cpp:1182
virtual int Connect(IInputSlot &destination)=0
OptimizeForType< Layer, AddBroadcastReshapeLayerImpl > AddBroadcastReshapeLayer
A Pooling2dDescriptor for the Pooling2dLayer.
ParsedTfOperationPtr AddActivationLayer(const tensorflow::NodeDef &nodeDef, armnn::ActivationDescriptor &desc)
Definition: TfParser.cpp:2984
A NormalizationDescriptor for the NormalizationLayer.
ParsedTfOperationPtr ParseSub(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1974
ParsedTfOperationPtr ParseMaxPool(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2997
ParsedTfOperationPtr ProcessComparisonLayer(armnn::IOutputSlot *input0Slot, armnn::IOutputSlot *input1Slot, armnn::IConnectableLayer *const layer, const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:1841
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
friend class ParsedMulTfOperation
Definition: ITfParser.hpp:60
ParsedTfOperationPtr ParseSigmoid(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2776
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
Definition: TfParser.cpp:3711
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
Definition: TfParser.cpp:3688
ParsedTfOperationPtr ParseGather(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:1894
const armnn::PermutationVector ArmNNToNHWC
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
A SoftmaxDescriptor for the SoftmaxLayer.
friend class DeferredSingleLayerParsedTfOperation
Definition: ITfParser.hpp:63
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ParsedTfOperationPtr ParseRealDiv(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Definition: TfParser.cpp:2746
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
Maps output layer names to their corresponding ids and tensor info.
Definition: TfParser.hpp:273
void LoadNodeDef(const tensorflow::NodeDef &nodeDef, const tensorflow::GraphDef &graphDef)
Parses a given node, assuming nodes before it in the graph have been done.
Definition: TfParser.cpp:3413
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:126
TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo &inputTensorInfo, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
Definition: TfParser.cpp:2176
uint32_t m_PadRight
Padding right value in the width dimension.
ParsedTfOperationPtr AddMaximumLayer(const tensorflow::NodeDef &nodeDef)
Definition: TfParser.cpp:3236
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef &nodeDef, TensorInfo inputTensorInfo)
Definition: TfParser.cpp:2485