ArmNN
 20.02
RecordByRecordCaffeParser.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "armnn/Exceptions.hpp"
9 #include "armnn/Utils.hpp"
10 
11 
12 #include "GraphTopologicalSort.hpp"
13 
14 #include <boost/numeric/conversion/cast.hpp>
15 
16 // Caffe
17 #include <google/protobuf/wire_format.h>
18 
19 
20 //#include <stdio.h>
21 #include <limits.h>
22 #include <sstream>
23 //#include <iostream>
24 #include <fstream>
25 
26 namespace armnnCaffeParser
27 {
28 // class which holds information on the absolute position in the stream
29 // of the data and the length of the data record.
30 class VarLenDataInfo
31 {
32 public:
33  VarLenDataInfo(std::streamoff positionOfData, size_t sizeOfData) :
34  m_PositionOfData(positionOfData), m_SizeOfData(sizeOfData) {}
35 
36  VarLenDataInfo(const VarLenDataInfo& x) :
37  m_PositionOfData(x.PositionOfData()), m_SizeOfData (x.SizeOfData()) {}
38 
39  VarLenDataInfo& operator=(const VarLenDataInfo& x)
40  {
41  // handle self assignment
42  if (this == &x) {
43  return *this;
44  }
45  m_PositionOfData = x.PositionOfData(); m_SizeOfData = x.SizeOfData(); return *this;
46  }
47 
48  std::streamoff PositionOfData() const {return m_PositionOfData;}
49  size_t SizeOfData() const {return m_SizeOfData;}
50 
51 private:
52  std::streamoff m_PositionOfData;
53  size_t m_SizeOfData;
54 
55 };
56 
57 // class which holds enough information on a LayerParameter in the Caffe protobuf
58 // format to allow it to be resolved for in place layering and sorted topologically
59 // prior to the entire record being parsed into memory.
60 //
61 // NOTE: function naming follows that of the protobuf classes these proxies are standing in for
62 class LayerParameterInfo : public VarLenDataInfo
63 {
64 public:
65  static const std::string INPUT;
66  LayerParameterInfo(const VarLenDataInfo& varLenDataInfo) :
67  VarLenDataInfo(varLenDataInfo.PositionOfData(), varLenDataInfo.SizeOfData()),
68  m_newTops(false), m_newBottoms(false) {}
69 
70  LayerParameterInfo(std::streamoff positionOfData, size_t sizeOfData) :
71  VarLenDataInfo(positionOfData, sizeOfData), m_newTops(false), m_newBottoms(false) {}
72 
73  LayerParameterInfo(const LayerParameterInfo& x) :
74  VarLenDataInfo(x.PositionOfData(), x.SizeOfData()),
75  m_name(x.m_name),
76  m_type(x.m_type),
77  m_tops(x.m_tops),
78  m_bottoms(x.m_bottoms),
79  m_newTops(x.m_newTops),
80  m_newBottoms(x.m_newBottoms) {}
81 
82  LayerParameterInfo& operator=(const LayerParameterInfo& x)
83  {
84  if (this == &x) {
85  return *this;
86  }
87  VarLenDataInfo::operator=(x);
88  m_name = x.m_name;
89  m_type = x.m_type;
90  m_tops = x.m_tops;
91  m_bottoms = x.m_bottoms;
92  m_newTops = x.m_newTops;
93  m_newBottoms = x.m_newBottoms;
94  return *this;
95  }
96 
97  const std::string name() const {return m_name;}
98  void set_name(const std::unique_ptr<char[]>& theName, size_t length)
99  {
100  m_name = std::string(theName.get(), length);
101  }
102  void set_name(const std::string& theName) {m_name = theName;}
103 
104  const std::string type() const {return m_type;}
105  void set_type(const std::unique_ptr<char[]>& theType, size_t length)
106  {
107  m_type = std::string(theType.get(), length);
108  }
109  void set_type(const std::string& theType) {m_type = theType;}
110 
111  void add_top(const std::unique_ptr<char[]>& top, size_t length)
112  {
113  std::string topName(top.get(), length);
114  m_tops.push_back(topName);
115  }
116  void add_top(const std::string& topName)
117  {
118  m_tops.push_back(topName);
119  }
120  const std::string top(unsigned long i) const {return m_tops[i];}
121  unsigned long top_size() const {return m_tops.size();}
122  void set_top(unsigned long i, const std::string& newName) {m_tops[i] = newName; m_newTops = true;}
123  bool new_tops() const {return m_newTops;}
124 
125  void add_bottom(const std::unique_ptr<char[]>& bottom, size_t length)
126  {
127  std::string bottomName(bottom.get(), length);
128  m_bottoms.push_back(bottomName);
129  }
130  unsigned long bottom_size() const {return m_bottoms.size();}
131  const std::string bottom(unsigned long i) const {return m_bottoms[i];}
132  void set_bottom(unsigned long i, const std::string& newName) {m_bottoms[i] = newName; m_newBottoms = true;}
133  bool new_bottoms() const {return m_newBottoms;}
134 
135  // if the position and size of the data is zero and the type is "Input" then this is an 'Implicit Input Layer'
136  // and needs to be handled differently from ordinary layers.
137  bool isImplicitInputLayer() const
138  {
139  if ((PositionOfData() == 0) && (SizeOfData() == 0) && INPUT.compare(type()) == 0)
140  {return true;} else {return false;}
141  }
142 
143 private:
144  std::string m_name;
145  std::string m_type;
146  std::vector<std::string> m_tops;
147  std::vector<std::string> m_bottoms;
148  // mark the layers whose topology was changed
149  // by the ResolveInPlaceLayers method.
150  bool m_newTops;
151  bool m_newBottoms;
152 };
153 
154 // class which holds the field type (wire type) and field id (id from the .proto schema)
155 // read from the protobuf messages as per the binary encoding described in
156 // https://developers.google.com/protocol-buffers/docs/encoding
157 //
158 // NOTE: function naming follows that of the protobuf classes these proxies are standing in for
159 class ProtobufFieldInfo
160 {
161 public:
162  ProtobufFieldInfo(int field_type, int field_id) :
163  m_eof(false), m_field_type(field_type), m_field_id(field_id) {}
164  ProtobufFieldInfo() : m_eof(true), m_field_type(0), m_field_id(0) {}
165 
166  bool eof() {return m_eof;}
167  int field_type() {return m_field_type;}
168  int field_id() {return m_field_id;}
169 
170 private:
171  bool m_eof;
172  int m_field_type;
173  int m_field_id;
174 };
175 
176 
177 // There are some NetParameter level data which are required
178 // to correctly processes some Caffe models. Specifically those which
179 // have 'implicit' input layers. Also it is nice to have the name of the model.
180 //
181 // NOTE: function naming follows that of the protobuf classes these proxies are standing in for
182 class NetParameterInfo
183 {
184 public:
185  const std::string name() const {return m_name;}
186  void set_name(const std::unique_ptr<char[]>& theName, size_t length)
187  {
188  m_name = std::string(theName.get(), length);
189  }
190 
191  void add_input(const std::unique_ptr<char[]>& input, size_t length)
192  {
193  std::string inputName(input.get(), length);
194  m_inputs.push_back(inputName);
195  }
196  const std::string input(unsigned long i) const {return m_inputs[i];}
197  unsigned long input_size() const {return m_inputs.size();}
198 
199  void add_input_dimension(int input_dimension) {
200  m_input_dimensions.push_back(input_dimension);
201  }
202  int input_dimension(unsigned long i) const {return m_input_dimensions[i];}
203  unsigned long input_dimensions_size() const {return m_input_dimensions.size();}
204 
205  void add_blob_shape(caffe::BlobShape shape) {
206  m_blob_shapes.push_back(shape);
207  }
208  const caffe::BlobShape blob_shape(unsigned long i) const {return m_blob_shapes[i];}
209  unsigned long blob_shapes_size() const {return m_blob_shapes.size();}
210 
211 private:
212  std::string m_name;
213  std::vector<std::string> m_inputs;
214  std::vector<int> m_input_dimensions;
215  std::vector<caffe::BlobShape> m_blob_shapes;
216 
217 };
218 
219 }; // namespace armnnCaffeParser
220 
221 using namespace armnnCaffeParser;
222 
223 // Initialise the class const
224 const std::string LayerParameterInfo::INPUT = "Input";
225 
226 namespace
227 {
228 
229 ProtobufFieldInfo readFieldInfo(std::ifstream& ifs)
230 {
231  unsigned char first_byte = static_cast<unsigned char>(ifs.get());
232  if (!ifs.good())
233  {
234  ProtobufFieldInfo eof;
235  return eof;
236  }
237  int field_type = first_byte&7;
238  int field_id = first_byte>>3;
239  if ((field_id & 16) == 16)
240  {
241  unsigned char second_byte = static_cast<unsigned char>(ifs.get());
242  if (!ifs.good())
243  {
244  ProtobufFieldInfo eof;
245  return eof;
246  }
247  field_id = (field_id-16) + ((second_byte&127)<<4);
248  }
249  ProtobufFieldInfo fieldInfo(field_type, field_id);
250  return fieldInfo;
251 }
252 
253 const static int MAX_NUM_BYTES = 5;
254 
255 int ReadBase128(std::ifstream& ifs)
256 {
257  int result = 0;
258  unsigned int shift_by = 0;
259  int bytesRead = 0;
260  while (true)
261  {
262  unsigned char a_byte = static_cast<unsigned char>(ifs.get());
263  ++bytesRead;
264  if (bytesRead > MAX_NUM_BYTES)
265  {
266  throw armnn::ParseException(
267  "ReadBase128 exceeded the maximum number of bytes expected for an integer representation");
268  }
269  result += (a_byte & 127) << shift_by;
270  shift_by += 7;
271  if ((a_byte & 128) != 128)
272  {
273  break;
274  }
275  }
276  return result;
277 }
278 
279 
280 std::unique_ptr<char[]> AllocateBuffer(std::ifstream& ifs, VarLenDataInfo& dataInfo)
281 {
282  std::unique_ptr<char[]> ptr(new char[dataInfo.SizeOfData()]);
283  ifs.clear();
284  ifs.seekg(dataInfo.PositionOfData(), std::ios_base::beg);
285  ifs.read(ptr.get(), boost::numeric_cast<std::streamsize>(dataInfo.SizeOfData()));
286  return ptr;
287 }
288 
289 VarLenDataInfo CreateVarLenDataInfo(std::streamoff bufferStart, std::streamoff endOfLayer) {
290  std::streamoff sizeOfLayer = endOfLayer - bufferStart;
291  if (sizeOfLayer < 0)
292  {
293  std::stringstream ss;
294  ss << "error when determining buffer size, negative value [" << sizeOfLayer << "]";
295  throw armnn::ParseException(ss.str());
296  }
297  // NOTE: as some of the data being read in will be translated into strings (names of layers etc)
298  // the maximum size we can deal with is the upper size limit of a string i.e. size_t
299  // on the platform in which I am currently compiling std::streamoff is signed long int and
300  // size_t is unsigned long int so there is no way this error condition can fire but this stuff
301  // is supposed to be portable so the check remains in place
302  if (boost::numeric_cast<size_t>(sizeOfLayer) > SIZE_MAX) {
303  std::stringstream ss;
304  ss << "layer is greater than " << SIZE_MAX << " in size cannot process. layer size = [" << sizeOfLayer << "]";
305  throw armnn::ParseException(ss.str());
306  }
307  LayerParameterInfo info(bufferStart, boost::numeric_cast<size_t>(sizeOfLayer));
308  return info;
309 }
310 
311 void ReadTopologicalInfoForLayerParameter(LayerParameterInfo& layerInfo, std::ifstream& ifs)
312 {
313  // position the file pointer to the start of the layer data
314  ifs.clear();
315  ifs.seekg(layerInfo.PositionOfData(), std::ios_base::beg);
316  std::streamoff endOfLayer = layerInfo.PositionOfData() +
317  boost::numeric_cast<std::streamoff>(layerInfo.SizeOfData());
318  while(true)
319  {
320  // check to see if we have reached the end of the record
321  std::streamoff currentPosition = ifs.tellg();
322  if (currentPosition >= endOfLayer) {
323  return;
324  }
325  // read the information for the next field.
326  ProtobufFieldInfo fieldInfo = readFieldInfo(ifs);
327  if (fieldInfo.eof())
328  {
329  return;
330  // TODO: figure out whether this is an error condition or not...
331  //throw armnn::ParseException("failed to read field from LayerParameter data");
332  }
333  // process the field
334  switch (fieldInfo.field_type())
335  {
336  case 0:
337  {
338  ReadBase128(ifs);
339  break;
340  }
341  case 2:
342  {
343  int size = ReadBase128(ifs);
344  std::streamoff posStartOfData = ifs.tellg();
345  VarLenDataInfo dataInfo(posStartOfData, boost::numeric_cast<size_t>(size));
346  //optional string name = 1; // the layer name
347  //optional string type = 2; // the layer type
348  //repeated string bottom = 3; // the name of each bottom blob
349  //repeated string top = 4; // the name of each top blob
350  if (fieldInfo.field_id() == 1)
351  {
352  // read and set the name of the layer
353  auto layerName = AllocateBuffer(ifs, dataInfo);
354  layerInfo.set_name(layerName, dataInfo.SizeOfData());
355  }
356  else if (fieldInfo.field_id() == 2)
357  {
358  // read and set the type of the layer
359  auto layerType = AllocateBuffer(ifs, dataInfo);
360  layerInfo.set_type(layerType, dataInfo.SizeOfData());
361  }
362  else if (fieldInfo.field_id() == 3)
363  {
364  // read and add a bottom to the layer
365  auto bottom = AllocateBuffer(ifs, dataInfo);
366  layerInfo.add_bottom(bottom, dataInfo.SizeOfData());
367  }
368  else if (fieldInfo.field_id() == 4)
369  {
370  // read and add a top to the layer
371  auto top = AllocateBuffer(ifs, dataInfo);
372  layerInfo.add_top(top, dataInfo.SizeOfData());
373  }
374  else
375  {
376  ifs.seekg(size, std::ios_base::cur);
377  if (!ifs.good())
378  {
379  // TODO: error out?
380  return;
381  }
382  }
383  break;
384  }
385  case 1:
386  {
387  // 64 bit
388  // advance by eight bytes
389  ifs.seekg(8, std::ios_base::cur);
390  if (!ifs.good())
391  {
392  // TODO: error out?
393  return;
394  }
395  break;
396  }
397  case 5:
398  {
399  // 32 bit
400  // advance by four bytes
401  ifs.seekg(4, std::ios_base::cur);
402  if (!ifs.good())
403  {
404  // TODO: error out?
405  return;
406  }
407  break;
408  }
409  default:
410  {
411  throw armnn::ParseException("Encounted an unknown field type");
412  break;
413  }
414  }
415  }
416 }
417 
418 void ResolveInPlaceLayers(std::vector<LayerParameterInfo>& layerInfo)
419 {
420  std::map<std::string, std::vector<LayerParameterInfo*>> layersByTop;
421  for (auto& info : layerInfo)
422  {
423  for (unsigned long i = 0; i < info.top_size(); ++i)
424  {
425  layersByTop[info.top(i)].push_back(&info);
426  }
427  }
428  // For each set of layers with the same top, resolve them to a linear chain rather than in-place layers.
429  // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op.
430  for (auto& layersWithSameTopIterator : layersByTop)
431  {
432  const std::string& top = layersWithSameTopIterator.first;
433  const std::vector<LayerParameterInfo*> layersWithSameTop = layersWithSameTopIterator.second;
434 
435  // Chain the layers together in the order that they are listed in the prototxt (hopefully this is correct).
436  // Note that the last layer will not have its top modified so that other layers will continue to reference it.
437  for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
438  {
439  LayerParameterInfo* layer1 = layersWithSameTop[layerIdx];
440  LayerParameterInfo* layer2 = layersWithSameTop[layerIdx + 1];
441  if (layer1->top_size() != 1)
442  {
443  throw armnn::ParseException("Node '" + layer1->name() + "' is an in-place layer but "
444  "doesn't have exactly one top.");
445  }
446  std::string newTop = layer1->name() + "_top";
447  layer1->set_top(0, newTop);
448  if (layer2->bottom_size() != 1 || layer2->bottom(0) != top)
449  {
450  throw armnn::ParseException("Node '" + layer2->name() + "' is an in-place layer but "
451  " doesn't have exactly one bottom, or it doesn't match its top.");
452  }
453  layer2->set_bottom(0, newTop);
454 
455  }
456  }
457 }
458 
459 } // anonymous namespace, can't be seen outside this source file
460 
462 {}
463 
465  const char* graphFile,
466  const std::map<std::string, armnn::TensorShape>& inputShapes,
467  const std::vector<std::string>& requestedOutputs)
468 {
469 
470  m_InputShapes = inputShapes;
471  if (requestedOutputs.size() == 0)
472  {
473  throw armnn::ParseException("requestedOutputs must have at least one entry");
474  }
475  m_RequestedOutputs = requestedOutputs;
476 
477  std::ifstream ifs(graphFile, std::ifstream::in|std::ifstream::binary);
478  if (ifs.fail())
479  {
480  throw armnn::FileNotFoundException("Failed to open graph file '" + std::string(graphFile) + "'");
481  }
482 
483  std::vector<LayerParameterInfo> layerInfo;
484  NetParameterInfo netParameterInfo;
485  while(true)
486  {
487  ProtobufFieldInfo fieldInfo = readFieldInfo(ifs);
488  if (fieldInfo.eof())
489  {
490  break;
491  }
492  switch(fieldInfo.field_type())
493  {
494  case 0:
495  {
496  ReadBase128(ifs);
497  break;
498  }
499  case 2:
500  {
501  // The values of interest from the caffe.proto schema are:
502  // optional string name = 1; // consider giving the network a name
503  // DEPRECATED. See InputParameter. The input blobs to the network.
504  // repeated string input = 3;
505  // DEPRECATED. See InputParameter. The shape of the input blobs.
506  // repeated BlobShape input_shape = 8;
507 
508  // 4D input dimensions -- deprecated. Use "input_shape" instead.
509  // If specified, for each input blob there should be four
510  // values specifying the num, channels, height and width of the input blob.
511  // Thus, there should be a total of (4 * #input) numbers.
512  // repeated int32 input_dim = 4;
513 
514  // The layers that make up the net. Each of their configurations, including
515  // connectivity and behavior, is specified as a LayerParameter.
516  // repeated LayerParameter layer = 100; // ID 100 so layers are printed last.
517 
518  // The first four will (if present) be read into the NetParameterInfo
519  // the LayerParameters will be read into the LayerParameterInfo vector.
520 
521  int size = ReadBase128(ifs);
522  std::streamoff posStartOfData = ifs.tellg();
523  ifs.seekg(size, std::ios_base::cur);
524  if(!ifs.good())
525  {
526  throw armnn::ParseException("failed to seek ahead in binary caffe file");
527  }
528  std::streamoff endOfLayer = ifs.tellg();
529  if (fieldInfo.field_id() == 1)
530  {
531  VarLenDataInfo dataInfo = CreateVarLenDataInfo(posStartOfData, endOfLayer);
532  auto graphName = AllocateBuffer(ifs, dataInfo);
533  netParameterInfo.set_name(graphName, dataInfo.SizeOfData());
534  }
535  if (fieldInfo.field_id() == 3)
536  {
537  VarLenDataInfo dataInfo = CreateVarLenDataInfo(posStartOfData, endOfLayer);
538  auto inputName = AllocateBuffer(ifs, dataInfo);
539  netParameterInfo.add_input(inputName, dataInfo.SizeOfData());
540  }
541  if (fieldInfo.field_id() == 8)
542  {
543  VarLenDataInfo dataInfo = CreateVarLenDataInfo(posStartOfData, endOfLayer);
544  auto inputShape = AllocateBuffer(ifs, dataInfo);
545  caffe::BlobShape blobShape;
546  bool bRet = blobShape.ParseFromArray(inputShape.get(), static_cast<int>(dataInfo.SizeOfData()));
547  if (!bRet)
548  {
549  throw armnn::ParseException("Failed to parse input shape");
550  }
551  netParameterInfo.add_blob_shape(blobShape);
552  }
553  if (fieldInfo.field_id() == 4)
554  {
555  int input_dim = ReadBase128(ifs);
556  netParameterInfo.add_input_dimension(input_dim);
557  }
558  if (fieldInfo.field_id() == 100)
559  {
560  LayerParameterInfo info(CreateVarLenDataInfo(posStartOfData, endOfLayer));
561  ReadTopologicalInfoForLayerParameter(info, ifs);
562  layerInfo.push_back(info);
563  }
564  break;
565  }
566  default:
567  {
568  break;
569  }
570  }
571  }
572  std::vector<const LayerParameterInfo*> sortedNodes;
573  ProcessLayers(netParameterInfo, layerInfo, m_RequestedOutputs, sortedNodes);
574  armnn::INetworkPtr networkPtr = LoadLayers(ifs, sortedNodes, netParameterInfo);
575  return networkPtr;
576 
577 }
578 
579 void RecordByRecordCaffeParser::ProcessLayers(
580  const NetParameterInfo& netParameterInfo,
581  std::vector<LayerParameterInfo>& layerInfo,
582  const std::vector<std::string>& m_RequestedOutputs,
583  std::vector<const LayerParameterInfo*>& sortedNodes)
584 {
585  // if there is an implicit input layer add it to the layerInfo list
586  if (netParameterInfo.input_size() > 0)
587  {
588  LayerParameterInfo implicitInputLayer(0, 0);
589  implicitInputLayer.set_type(LayerParameterInfo::INPUT);
590  implicitInputLayer.set_name(netParameterInfo.input(0));
591  implicitInputLayer.add_top(netParameterInfo.input(0));
592  layerInfo.push_back(implicitInputLayer);
593  }
594  ::ResolveInPlaceLayers(layerInfo);
595 
596  for (LayerParameterInfo& info : layerInfo)
597  {
598  for (unsigned long i = 0; i < info.top_size(); ++i)
599  {
600  m_CaffeLayersByTopName[info.top(i)] = &info;
601  }
602  }
603 
604  // Find the output layers the user requested
605  std::vector<const LayerParameterInfo*> targetLayers;
606  for (const std::string& requestedOutputName : m_RequestedOutputs)
607  {
608  auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName);
609  if (nodeIt == m_CaffeLayersByTopName.end())
610  {
611  throw armnn::ParseException(
612  "Couldn't find requested output layer '" + requestedOutputName + "' in graph");
613  }
614  targetLayers.push_back(nodeIt->second);
615  }
616 
617  // Sort them into a linear ordering such that all inputs of a node are before the node itself
618  if (!armnnUtils::GraphTopologicalSort<const LayerParameterInfo*>(
619  targetLayers,
620  [this](const LayerParameterInfo* node)
621  {
622  return GetInputs(*node);
623  },
624  sortedNodes))
625  {
626  throw armnn::ParseException("Cycle detected in graph");
627  }
628 }
629 
630 
631 std::vector<const LayerParameterInfo*> RecordByRecordCaffeParser::GetInputs(
632  const LayerParameterInfo& layerParam)
633 {
634  std::vector<const LayerParameterInfo*> ret;
635  ret.reserve(layerParam.bottom_size());
636  for (unsigned long j = 0; j < layerParam.bottom_size(); ++j)
637  {
638  std::string inputName = layerParam.bottom(j);
639  auto inputIt = m_CaffeLayersByTopName.find(inputName);
640  if (inputIt == m_CaffeLayersByTopName.end())
641  {
642  throw armnn::ParseException(
643  "Can't find Caffe layer with top called '" + inputName + "', which is listed as an input of '" +
644  layerParam.name() + "'");
645  }
646  ret.push_back(inputIt->second);
647  }
648 
649  return ret;
650 }
651 
652 armnn::INetworkPtr RecordByRecordCaffeParser::LoadLayers(std::ifstream& ifs,
653  std::vector<const LayerParameterInfo *>& sortedNodes,
654  const NetParameterInfo& netParameterInfo)
655 {
656 
659 
661 
662  for (auto info : sortedNodes)
663  {
664  caffe::LayerParameter layer;
665  if (info->isImplicitInputLayer())
666  {
667  // create the matching Layer Parameter programatically from the data in the
668  // net parameter info which has been passed in...
669  layer.set_type(LayerParameterInfo::INPUT);
670  layer.set_name(netParameterInfo.input(0));
671  layer.add_top(netParameterInfo.input(0));
672 
673  caffe::InputParameter* inputParam = layer.mutable_input_param();
674  caffe::BlobShape* shape = inputParam->add_shape();
675 
676  long unsigned int dim_size = netParameterInfo.input_dimensions_size();
677  for (long unsigned int i = 0; i < dim_size; ++i)
678  {
679  shape->add_dim(netParameterInfo.input_dimension(i));
680  }
681  }
682  else
683  {
684  char *buffer = new char[info->SizeOfData()];
685  ifs.clear();
686  ifs.seekg(info->PositionOfData(), std::ios_base::beg);
687  ifs.read(buffer, boost::numeric_cast<std::streamsize>(info->SizeOfData()));
688  bool bRet = layer.ParseFromArray(buffer, static_cast<int>(info->SizeOfData()));
689  delete[] buffer;
690  if (!bRet)
691  {
692  throw armnn::ParseException("Failed to parse layer [" + info->name() + "]");
693  }
694  }
695 
696  if (info->new_tops())
697  {
698  //update the tops
699  layer.set_top(0, info->top(0));
700  }
701  if (info->new_bottoms())
702  {
703  //update the bottoms
704  layer.set_bottom(0, info->bottom(0));
705  }
706 
707  auto it = ms_CaffeLayerNameToParsingFunctions.find(layer.type());
708  if (it == ms_CaffeLayerNameToParsingFunctions.end())
709  {
710  throw armnn::ParseException("Unsupported layer type '" + layer.type() + "'");
711  }
712  auto func = it->second;
713  (this->*func)(layer);
714  }
715  ifs.close();
716 
717  // Add ArmNN output layers connected to each requested output
718  for (const std::string& requestedOutput : m_RequestedOutputs)
719  {
720  armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
721 
724  armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
725  outputSlot.Connect(outputLayer->GetInputSlot(0));
726 
727  TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
728  }
729 
730  Cleanup();
731 
732  return move(m_Network);
733 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
static const std::map< std::string, OperationParsingFunction > ms_CaffeLayerNameToParsingFunctions
Maps Caffe layer names to parsing member functions.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the gen...
DataLayout::NHWC false
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
void ResolveInPlaceLayers(caffe::NetParameter &netParameter)
Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) ...
An output connection slot for a layer.
Definition: INetwork.hpp:37
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos
std::map< std::string, armnn::TensorShape > m_InputShapes
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Create the network from a protobuf binary file on disk.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create()
Definition: Network.cpp:49