ArmNN
 20.08
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Graph.hpp"
7 #include "SubgraphView.hpp"
8 #include "LayersFwd.hpp"
9 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/Utils.hpp>
16 #include <armnn/utility/Assert.hpp>
17 
18 #include <boost/format.hpp>
19 
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 {
30  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
31 
32  for (auto&& otherLayer : other.m_Layers)
33  {
34  Layer* const layer = otherLayer->Clone(*this);
35  otherToClonedMap.emplace(otherLayer, layer);
36  }
37 
38  // Copies slot connections.
39  for (auto&& otherLayer : other.m_Layers)
40  {
41  Layer* const thisLayer = otherToClonedMap[otherLayer];
42 
43  auto outputSlot = thisLayer->BeginOutputSlots();
44  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
45  {
46  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
47  {
48  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
49  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
50 
51  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
52  outputSlot->Connect(inputSlot);
53  }
54  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
55  ++outputSlot;
56  }
57  }
58 }
59 
61 {
62  if (m_Layers.empty())
63  {
64  ARMNN_LOG(info) << "\n Graph is empty.\n";
65  return Status::Success;
66  }
67  ARMNN_LOG(info) << "\n";
68  ARMNN_LOG(info) << "Walking Pattern: \n";
69 
70  for (auto&& it : TopologicalSort())
71  {
72  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
73  << ":" << it->GetBackendId().Get();
74  }
75  ARMNN_LOG(info) << "\n\n";
76 
77  return Status::Success;
78 }
79 
80 Status Graph::SerializeToDot(std::ostream& stream)
81 {
82  {
83  DotGraph graph(stream, "Optimized");
84 
85  {
86  // Default node attributes:
87  DotDefaults nodes(stream, "node");
88  nodes.GetAttributeSet()
89  .AddAttribute("shape", "record");
90  }
91 
92  {
93  // Default edge attributes:
94  DotDefaults edges(stream, "edge");
95  edges.GetAttributeSet()
96  .AddAttribute("fontsize", 8)
97  .AddAttribute("fontcolor", "blue")
98  .AddAttribute("fontname", "arial-bold");
99  }
100 
101  // First declares the nodes.
102  for (auto&& layer : m_Layers)
103  {
104  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
105  // Extracts the layer parameters.
106  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
107  node.GetContents().AddContent(name + " : " + value);
108  };
109  layer->SerializeLayerParameters(extractParams);
110  }
111 
112  // Second declares the edges.
113  for (auto&& layer : m_Layers)
114  {
115  LayerGuid toId = layer->GetGuid();
116 
117  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
118  {
119  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
120  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
121  DotEdge edge(stream, fromId, toId);
122 
123  // Now print the tensor shape on the edge.
124  {
125  // Constructs the label attribute with HTML markup.
126  std::stringstream ss;
127  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
128  edge.GetAttributeSet().AddAttribute("label", ss);
129  }
130  }
131  }
132  }
133 
134  if (stream.bad())
135  {
136  return Status::Failure;
137  }
138  return Status::Success;
139 }
140 
142 {
143  // Layers must be sorted in topological order
144  ARMNN_ASSERT(m_LayersInOrder);
145 
146  std::unordered_set<const ITensorHandle*> preallocatedTensors;
147  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
148 
149  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
150  // is a TensorHandle, the function just returns it
151  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
152  {
153  ITensorHandle* ancestor = subTensorHandle;
154  while (ancestor && ancestor->GetParent())
155  {
156  ancestor = ancestor->GetParent();
157  }
158  return ancestor;
159  };
160 
161  // Checks whether a TensorHandle has been pre-allocated
162  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
163  {
164  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
165  };
166 
167  // Constant tensor handles need to last from the beginning of execution till the end,
168  // therefore we pre-allocate them upfront
169  for (auto&& layer : m_Layers)
170  {
171  if (layer->GetType() == LayerType::Constant)
172  {
173  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
174  {
175  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
176 
177  if (tensorHandle && !IsPreallocated(tensorHandle))
178  {
179  tensorHandle->Allocate();
180  preallocatedTensors.insert(tensorHandle);
181  }
182  }
183  }
184  }
185 
186  // Iterate over the network in topological order
187  for (auto&& layer : m_Layers)
188  {
189  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
190  // The first time we encounter a new tensor handle, we start managing its lifetime.
191  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
192  {
193  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
194 
195  if (tensorHandle && !IsPreallocated(tensorHandle))
196  {
197  unsigned int numConnections = slot->GetNumConnections();
198  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
199  {
200  handleReferenceCounts[tensorHandle] = numConnections;
201  tensorHandle->Manage();
202  if (handleReferenceCounts[tensorHandle] == 0u)
203  {
204  // if nobody consumes this tensor we call Allocate()
205  tensorHandle->Allocate();
206  }
207  }
208  else
209  {
210  handleReferenceCounts[tensorHandle] += numConnections;
211  }
212  }
213  }
214 
215  // Loop through the input slots in the same layer and decrement the reference counter associated
216  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
217  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
218  {
219  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
220  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
221 
222  if (tensorHandle && !IsPreallocated(tensorHandle))
223  {
224  --handleReferenceCounts[tensorHandle];
225 
226  if (handleReferenceCounts[tensorHandle] == 0u)
227  {
228  // Stop managing lifetime of tensor handle
229  tensorHandle->Allocate();
230  handleReferenceCounts.erase(tensorHandle);
231  }
232  }
233  }
234  }
235 
236  return Status::Success;
237 }
238 
240 {
241  if (!m_LayersInOrder)
242  {
243  // Resets layer order.
244  for (auto&& it : m_Layers)
245  {
246  it->ResetPriority();
247  }
248 
249  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
250  {
251  return layerA->GetPriority() < layerB->GetPriority();
252  };
253 
254  m_Layers.sort(compareLayerPriority);
255 
256  m_LayersInOrder = true;
257  }
258 
259  return *this;
260 }
261 
262 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
263  TensorHandleFactoryRegistry& registry)
264 {
265  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
266  // connections to other layers).
267  auto MayNeedCompatibilityLayer = [](const Layer& layer)
268  {
269  // All layers should have been associated with a valid compute device at this point.
270  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
271  // Does not need another compatibility layer if a copy or import layer is already present.
272  return layer.GetType() != LayerType::MemCopy &&
273  layer.GetType() != LayerType::MemImport;
274  };
275 
276  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
277  {
278  return strategy == EdgeStrategy::CopyToTarget ||
279  strategy == EdgeStrategy::ExportToTarget;
280  };
281 
282  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
283  {
284  ARMNN_ASSERT(srcLayer);
285 
286  if (!MayNeedCompatibilityLayer(*srcLayer))
287  {
288  // The current layer does not need copy layers, move to the next one
289  return;
290  }
291 
292  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
293  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
294  {
295  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
296  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
297  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
298  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
299  {
300  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
301  ARMNN_ASSERT(dstInputSlot);
302 
303  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
305  "Undefined memory strategy found while adding copy layers for compatibility");
306 
307  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
308  if (MayNeedCompatibilityLayer(dstLayer) &&
309  IsCompatibilityStrategy(strategy))
310  {
311  // A copy layer is needed in between the source and destination layers.
312  // Record the operation rather than attempting to modify the graph as we go.
313  // (invalidating iterators)
314  const std::string compLayerName = boost::str(boost::format("[ %1% (%2%) -> %3% (%4%) ]")
315  % srcLayer->GetName()
316  % srcOutputIndex
317  % dstLayer.GetName()
318  % dstInputSlot->GetSlotIndex());
319 
320  Layer* compLayer = nullptr;
321  if (strategy == EdgeStrategy::CopyToTarget)
322  {
323  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
324  }
325  else
326  {
327  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
328  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
329  }
330 
331  compLayer->SetBackendId(dstLayer.GetBackendId());
332 
333  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
334  auto backendIt = backends.find(dstLayer.GetBackendId());
335  if (backendIt != backends.end() &&
336  backendIt->second &&
337  backendIt->second->SupportsTensorAllocatorAPI())
338  {
339  auto backend = backendIt->second.get();
340  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
341  bool found = false;
342 
343  for (auto preference : tensorHandleFactoryIds)
344  {
345  auto factory = registry.GetFactory(preference);
346  if (factory)
347  {
348  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
349  auto srcFactory = registry.GetFactory(srcPref);
350 
351  if (srcFactory)
352  {
353  bool canExportImport =
354  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
355 
356  if (factory->SupportsMapUnmap() || canExportImport)
357  {
358  compOutputSlot.SetTensorHandleFactory(preference);
359  found = true;
360  break;
361  }
362  }
363  }
364  }
365 
366  if (!found)
367  {
369  }
370  }
371  else
372  {
374  }
375 
376  // The output strategy of a compatibility layer is always DirectCompatibility.
378 
379  // Recalculate the connection index on the previous layer as we have just inserted into it.
380  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
381  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
382  std::find(newSourceConnections.begin(),
383  newSourceConnections.end(),
384  &compLayer->GetInputSlot(0)));
385 
386  // The input strategy of a compatibility layer is always DirectCompatibilty.
387  srcOutputSlot.SetEdgeStrategy(boost::numeric_cast<unsigned int>(newSrcConnectionIndex),
389  }
390  }
391  }
392  });
393 }
394 
396 {
397  ARMNN_ASSERT(substituteLayer != nullptr);
398 
399  ReplaceSubgraphConnections(subgraph, substituteLayer);
400  EraseSubgraphLayers(subgraph);
401 }
402 
403 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
404 {
405  // Look through each layer in the new subgraph and add any that are not already a member of this graph
406  substituteSubgraph.ForEachLayer([this](Layer* layer)
407  {
408  if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
409  {
410  layer->Reparent(*this, m_Layers.end());
411  m_LayersInOrder = false;
412  }
413  });
414 
415  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
416  EraseSubgraphLayers(subgraph);
417  TopologicalSort();
418 }
419 
420 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectableLayer* substituteLayer)
421 {
422  ARMNN_ASSERT(substituteLayer != nullptr);
423 
424  // Create a new sub-graph with only the given layer, using
425  // the given sub-graph as a reference of which parent graph to use
426  SubgraphView substituteSubgraph(substituteLayer);
427  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
428 }
429 
430 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
431 {
432  ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
433 
434  const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
435  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
436  {
437  IgnoreUnused(layer);
438  ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
439  "Substitute layer is not a member of graph");
440  });
441 
442  const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots();
443  const SubgraphView::OutputSlots& subgraphOutputSlots = subgraph.GetOutputSlots();
444 
445  unsigned int subgraphNumInputSlots = boost::numeric_cast<unsigned int>(subgraphInputSlots.size());
446  unsigned int subgraphNumOutputSlots = boost::numeric_cast<unsigned int>(subgraphOutputSlots.size());
447 
448  const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
449  const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
450 
451  ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
452  ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
453 
454  // Disconnect the sub-graph and replace it with the substitute sub-graph
455 
456  // Step 1: process input slots
457  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
458  {
459  InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
460  ARMNN_ASSERT(subgraphInputSlot);
461 
462  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
463  ARMNN_ASSERT(connectedOutputSlot);
464  connectedOutputSlot->Disconnect(*subgraphInputSlot);
465 
466  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
467  ARMNN_ASSERT(substituteInputSlot);
468  connectedOutputSlot->Connect(*substituteInputSlot);
469  }
470 
471  // Step 2: process output slots
472  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
473  {
474  OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
475  ARMNN_ASSERT(subgraphOutputSlot);
476 
477  OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
478  ARMNN_ASSERT(substituteOutputSlot);
479  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
480  }
481 }
482 
483 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
484 {
485  for (auto layer : subgraph.GetLayers())
486  {
487  EraseLayer(layer);
488  }
489  subgraph.Clear();
490 }
491 
493 {
494  for (auto&& layer : TopologicalSort())
495  {
496  for (auto&& input : layer->GetInputSlots())
497  {
498  const IOutputSlot* source = input.GetConnectedOutputSlot();
499  if (source == NULL)
500  {
501  std::ostringstream message;
502  message << "Input not connected on "
503  << GetLayerTypeAsCString(layer->GetType())
504  << " layer \""
505  << layer->GetName()
506  << "\"";
507  throw LayerValidationException(message.str());
508  }
509 
510  if (!source->IsTensorInfoSet())
511  {
512  throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
513  }
514 
515  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
516  {
517  layer->ValidateTensorShapesFromInputs();
518  }
519  }
520  }
521 }
522 
523 } // namespace armnn
Graph(bool shapeInferenceMethod=false)
Definition: Graph.hpp:96
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:125
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
NodeContent & AddContent(const std::string &content)
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:182
No strategy has been defined. Used internally to verify integrity of optimizations.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:80
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:126
virtual void Reparent(Graph &dest, std::list< Layer *>::const_iterator iterator)=0
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
Source backends tensor data can be exported to destination backend tensor without copy...
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:450
std::vector< OutputSlot * > OutputSlots
DotAttributeSet & GetAttributeSet()
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
Destination backend can work directly with tensors on source backend.
The SubgraphView class represents a subgraph of a Graph.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
char const * GetLayerTypeAsCString(LayerType type)
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
NodeContent & GetContents()
An output connection slot for a layer.
Definition: INetwork.hpp:37
unsigned int GetSlotIndex() const
Definition: Layer.hpp:53
virtual void Disconnect(IInputSlot &slot)=0
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
Validate all output shapes.
Status
enumeration
Definition: Types.hpp:26
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
Layer & GetOwningLayer() const
Definition: Layer.hpp:52
const BackendId & GetBackendId() const
Definition: Layer.hpp:265
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:234
std::vector< InputSlot * > InputSlots
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:395
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:172
const InputSlots & GetInputSlots() const
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:241
const OutputSlots & GetOutputSlots() const
void InferTensorInfos()
Definition: Graph.cpp:492
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
virtual bool IsTensorInfoSet() const =0
const Layers & GetLayers() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
DotAttributeSet & GetAttributeSet()
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:307
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:177
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:178
virtual int Connect(IInputSlot &destination)=0
void ForEachLayer(Func func) const
std::list< Layer * > Layers
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:141
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:262
Status Print() const
Definition: Graph.cpp:60
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:116
An input connection slot for a layer.
Definition: INetwork.hpp:24
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:318