ArmNN
 20.11
LoadedNetwork.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LoadedNetwork.hpp"
7 #include "Layer.hpp"
8 #include "Graph.hpp"
9 #include "Network.hpp"
10 #include <Processes.hpp>
11 #include "Profiling.hpp"
12 #include "HeapProfiling.hpp"
13 
15 #include <armnn/Logging.hpp>
16 #include <armnn/utility/Assert.hpp>
17 
22 
24 
25 #include <fmt/format.h>
26 
27 namespace armnn
28 {
29 
30 using namespace std;
31 using namespace armnn::profiling;
32 
33 namespace
34 {
35 
36 template <typename ExceptionType>
37 std::string ToErrorMessage(const char * prefix, const ExceptionType & error)
38 {
39  std::stringstream ss;
40  ss << prefix << " " << error.what();
41  return ss.str();
42 }
43 
44 void AddLayerStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
45  const Layer& layer,
46  ProfilingGuid networkGuid)
47 {
48  // Add layer to the post-optimisation network structure
49  std::string layerName = layer.GetNameStr().empty() ? "<Unnamed>" : layer.GetNameStr();
50  timelineUtils->CreateNamedTypedChildEntity(layer.GetGuid(),
51  networkGuid,
52  layerName,
54  for (auto&& input : layer.GetInputSlots())
55  {
56  const IOutputSlot* source = input.GetConnectedOutputSlot();
57  ARMNN_ASSERT(source != NULL);
58  timelineUtils->CreateConnectionRelationship(ProfilingRelationshipType::RetentionLink,
59  source->GetOwningLayerGuid(),
60  layer.GetGuid());
61  }
62 }
63 
64 void AddWorkloadStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
65  std::unique_ptr<IWorkload>& workload,
66  const Layer& layer)
67 {
68  // Add workload to the post-optimisation network structure
69  timelineUtils->CreateTypedEntity(workload->GetGuid(), LabelsAndEventClasses::WORKLOAD_GUID);
70  timelineUtils->MarkEntityWithLabel(workload->GetGuid(),
71  layer.GetBackendId().Get(),
73 
74  // Link the workload to the layer
75  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink,
76  layer.GetGuid(),
77  workload->GetGuid(),
79 }
80 
81 } // anonymous
82 
83 std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
84  std::string& errorMessage,
85  const INetworkProperties& networkProperties,
86  profiling::ProfilingService& profilingService)
87 {
88  std::unique_ptr<LoadedNetwork> loadedNetwork;
89 
90  auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
91  {
92  errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
93  ARMNN_LOG(error) << errorMessage;
94 
95  return std::unique_ptr<LoadedNetwork>();
96  };
97 
98  try
99  {
100  loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties, profilingService));
101  }
102  catch (const armnn::RuntimeException& error)
103  {
104  return Fail(error);
105  }
106  catch (const armnn::Exception& error)
107  {
108  return Fail(error);
109  }
110  catch (const std::runtime_error& error)
111  {
112  return Fail(error);
113  }
114 
115  return loadedNetwork;
116 }
117 
118 LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
119  const INetworkProperties& networkProperties,
120  profiling::ProfilingService& profilingService) :
121  m_OptimizedNetwork(std::move(net)),
122  m_IsImportEnabled(networkProperties.m_ImportEnabled),
123  m_IsExportEnabled(networkProperties.m_ExportEnabled),
124  m_TensorHandleFactoryRegistry(),
125  m_ProfilingService(profilingService)
126 {
127  // Create a profiler and register it for the current thread.
128  m_Profiler = std::make_shared<Profiler>();
130 
131  Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
132  //First create tensor handlers, backends and workload factories.
133  //Handlers are created before workloads are.
134  //Because workload creation can modify some of the handlers,
135  //(for example the splitter and concat layers).
136  for (auto&& layer : order)
137  {
138  auto const& backendId = layer->GetBackendId();
139  if (m_Backends.count(backendId) == 0)
140  {
141  auto createBackend = BackendRegistryInstance().GetFactory(backendId);
142  auto it = m_Backends.emplace(std::make_pair(backendId, createBackend()));
143 
144  IBackendInternal* backend = it.first->second.get();
145 
146  if (backend->SupportsTensorAllocatorAPI())
147  {
148  auto workloadFactory = backend->CreateWorkloadFactory(
149  m_TensorHandleFactoryRegistry, m_OptimizedNetwork->GetModelOptions());
150  m_WorkloadFactories.emplace(
151  std::make_pair(backendId, std::make_pair(std::move(workloadFactory), nullptr)));
152  }
153  else
154  {
156  auto workloadFactory = backend->CreateWorkloadFactory(
157  memoryManager, m_OptimizedNetwork->GetModelOptions());
158 
159  m_WorkloadFactories.emplace(
160  std::make_pair(backendId, std::make_pair(std::move(workloadFactory), memoryManager)));
161  }
162  }
163  }
164 
165  for (auto&& layer : order)
166  {
167  auto& workloadFactory = GetWorkloadFactory(*layer);
168 
169  switch (layer->GetType())
170  {
171  case LayerType::Input:
173  {
174  // If IsImportEnabled is true then we need to set IsMemoryManaged to false when creating TensorHandles
175  layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory, !m_IsImportEnabled);
176  break;
177  }
178  default:
179  {
180  // Look for the layer with 1 OutputSlot which has 1 connection and that connection is an Output Layer
181  // If Export is enabled disable memory management so we can export, otherwise we do a copy
182  if((layer->GetNumOutputSlots() == 1) &&
183  (layer->GetOutputSlots()[0].GetNumConnections() == 1) &&
184  (layer->GetOutputSlots()[0].GetConnection(0)->GetOwningLayer().GetType() == LayerType::Output))
185  {
186  layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory, !m_IsExportEnabled);
187  }
188  else
189  {
190  layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory);
191  }
192  }
193  }
194  }
195 
196  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
197  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
198  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
199  if (timelineUtils)
200  {
201  timelineUtils->CreateTypedEntity(networkGuid, LabelsAndEventClasses::NETWORK_GUID);
202  // Mark the network with a start of life event
203  timelineUtils->RecordEvent(networkGuid, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
204  // and with the process ID
205  int processID = armnnUtils::Processes::GetCurrentId();
206  std::stringstream ss;
207  ss << processID;
208  timelineUtils->MarkEntityWithLabel(networkGuid, ss.str(), LabelsAndEventClasses::PROCESS_ID_GUID);
209  }
210 
211  //Then create workloads.
212  for (auto&& layer : order)
213  {
214  if (timelineUtils)
215  {
216  // Add layer to the post-optimisation network structure
217  AddLayerStructure(timelineUtils, *layer, networkGuid);
218  }
219 
220  const IWorkloadFactory& workloadFactory = GetWorkloadFactory(*layer);
221 
222  switch (layer->GetType())
223  {
224  case LayerType::Input:
225  case LayerType::Output:
226  {
227  // Inputs and outputs are treated in a special way - see EnqueueInput() and EnqueueOutput().
228  break;
229  }
230  default:
231  {
232  auto workload = layer->CreateWorkload(workloadFactory);
233 
234  if (!workload)
235  {
236  const char* const layerName =
237  layer->GetNameStr().length() != 0 ? layer->GetName() : "<Unnamed>";
239  fmt::format("No workload created for layer (name: '{0}' type: '{1}') (compute '{2}')",
240  layerName, static_cast<int>(layer->GetType()), layer->GetBackendId().Get()
241  ));
242  }
243 
244  if (timelineUtils)
245  {
246  // Add workload to the post-optimisation network structure
247  AddWorkloadStructure(timelineUtils, workload, *layer);
248  }
249 
250  m_WorkloadQueue.push_back(move(workload));
251  // release the constant data in the layer..
252  layer->ReleaseConstantData();
253  break;
254  }
255  }
256  }
257 
258  if (timelineUtils)
259  {
260  // Commit to send the post-optimisation network structure
261  timelineUtils->Commit();
262  }
263 
264  // Set up memory.
265  m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers();
266 
267  // Now that the intermediate tensor memory has been set-up, do any post allocation configuration for each workload.
268  for (auto& workload : m_WorkloadQueue)
269  {
270  workload->PostAllocationConfigure();
271  }
272 }
273 
275 {
276  Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
277  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
278 
279  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
280  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
281 
282  timelineUtils->CreateTypedEntity(networkGuid, LabelsAndEventClasses::NETWORK_GUID);
283 
284  for (auto&& layer : order)
285  {
286  // Add layer to the post-optimisation network structure
287  AddLayerStructure(timelineUtils, *layer, networkGuid);
288  switch (layer->GetType())
289  {
290  case LayerType::Input:
291  case LayerType::Output:
292  {
293  // Inputs and outputs are treated in a special way - see EnqueueInput() and EnqueueOutput().
294  break;
295  }
296  default:
297  {
298  for (auto& workload : m_WorkloadQueue)
299  {
300  // Add workload to the post-optimisation network structure
301  AddWorkloadStructure(timelineUtils, workload, *layer);
302  }
303  break;
304  }
305  }
306  }
307  // Commit to send the post-optimisation network structure
308  timelineUtils->Commit();
309 }
310 
312 {
313  return m_OptimizedNetwork->GetGuid();
314 }
315 
317 {
318  for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
319  {
320  ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
321  if (inputLayer->GetBindingId() == layerId)
322  {
323  return inputLayer->GetOutputSlot(0).GetTensorInfo();
324  }
325  }
326 
327  throw InvalidArgumentException(fmt::format("No input layer is associated with id {}", layerId));
328 }
329 
331 {
332  for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
333  {
334  ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
335  ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
336  if (outputLayer->GetBindingId() == layerId)
337  {
338  return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
339  }
340  }
341 
342  throw InvalidArgumentException(fmt::format("No output layer is associated with id {}", layerId));
343 }
344 
345 const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) const
346 {
347  const IWorkloadFactory* workloadFactory = nullptr;
348 
349  auto it = m_WorkloadFactories.find(layer.GetBackendId());
350  if (it == m_WorkloadFactories.end())
351  {
352  throw RuntimeException(fmt::format("No workload factory for {0} to be used for layer: {1}",
353  layer.GetBackendId().Get(),
354  layer.GetNameStr()),
355  CHECK_LOCATION());
356  }
357 
358  workloadFactory = it->second.first.get();
359 
360  ARMNN_ASSERT_MSG(workloadFactory, "No workload factory");
361 
362  std::string reasonIfUnsupported;
364  {},
365  reasonIfUnsupported,
366  m_OptimizedNetwork->GetModelOptions()),
367  "Factory does not support layer");
368  IgnoreUnused(reasonIfUnsupported);
369  return *workloadFactory;
370 }
371 
372 namespace {
373 
374 // Non-copyable class owning accelerator-specific tensor data.
375 class TensorPin
376 {
377 public:
378  TensorPin(std::unique_ptr<ITensorHandle> handle, const TensorInfo& info, LayerBindingId id)
379  : m_TensorHandle(std::move(handle))
380  , m_TensorInfo(info)
381  , m_Id(id)
382  {
383  }
384 
385  ITensorHandle* GetTensorHandle() const { return m_TensorHandle.get(); }
386  const TensorInfo& GetTensorInfo() const { return m_TensorInfo; }
387  LayerBindingId GetBindingId() const { return m_Id; }
388 
389 private:
390  std::unique_ptr<ITensorHandle> m_TensorHandle;
391  TensorInfo m_TensorInfo;
392  LayerBindingId m_Id;
393 };
394 
395 static const TensorPin& GetTensorPin(LayerBindingId id,
396  const std::vector<TensorPin>& pins,
397  char const* bindingPointDesc)
398 {
399  auto it = std::find_if(pins.begin(), pins.end(),
400  [id](const TensorPin& pin)
401  {
402  return pin.GetBindingId() == id;
403  });
404 
405  if (it != pins.end())
406  {
407  return *it;
408  }
409  else
410  {
411  throw InvalidArgumentException(fmt::format("No tensor supplied for {0} {1}", bindingPointDesc, id));
412  }
413 }
414 
415 // Stores data that needs to be kept accessible for the entire execution of a workload.
416 class WorkloadData
417 {
418 public:
419  WorkloadData(const InputTensors& inputTensors, const OutputTensors& outputTensors)
420  {
421  m_InputTensorPins.reserve(inputTensors.size());
422  m_OutputTensorPins.reserve(outputTensors.size());
423 
424  for (auto inputTensorPair : inputTensors)
425  {
426  auto inputTensor = inputTensorPair.second;
427 
428  std::unique_ptr<ITensorHandle> tensorHandle =
429  std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea());
430  LayerBindingId layerId = inputTensorPair.first;
431 
432  m_InputTensorPins.emplace_back(std::move(tensorHandle), inputTensor.GetInfo(), layerId);
433  }
434 
435  for (auto outputTensorPair : outputTensors)
436  {
437  auto outputTensor = outputTensorPair.second;
438 
439  std::unique_ptr<ITensorHandle> tensorHandle =
440  std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea());
441  LayerBindingId layerId = outputTensorPair.first;
442 
443  m_OutputTensorPins.emplace_back(std::move(tensorHandle), outputTensor.GetInfo(), layerId);
444  }
445  }
446 
447  const TensorPin& GetInputTensorPin(LayerBindingId id) const
448  {
449  return GetTensorPin(id, m_InputTensorPins, "input");
450  }
451 
452  const TensorPin& GetOutputTensorPin(LayerBindingId id) const
453  {
454  return GetTensorPin(id, m_OutputTensorPins, "output");
455  }
456 
457 private:
458 
459  std::vector<TensorPin> m_InputTensorPins;
460  std::vector<TensorPin> m_OutputTensorPins;
461 };
462 
463 }
464 
466  const OutputTensors& outputTensors)
467 {
468  const Graph& graph = m_OptimizedNetwork->GetGraph();
469 
470  // Walk graph to determine the order of execution.
471  if (graph.GetNumLayers() < 2)
472  {
473  ARMNN_LOG(warning) << "IRuntime::EnqueueWorkload()::Less than two nodes in graph";
474  return Status::Failure;
475  }
476 
477  // Data that must be kept alive for the entire execution of the workload.
478  WorkloadData workloadData(inputTensors, outputTensors);
479 
480  if (graph.GetNumInputs() != inputTensors.size())
481  {
482  throw InvalidArgumentException("Number of inputs provided does not match network.");
483  }
484 
485  // For each input to the network, call EnqueueInput with the data passed by the user.
486  {
488  m_InputQueue.clear();
489  m_InputQueue.reserve(graph.GetNumInputs());
490  for (const BindableLayer* inputLayer : graph.GetInputLayers())
491  {
492  const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId());
493  EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
494  }
495  }
496 
497  // For each output to the network, call EnqueueOutput with the data passed by the user.
498  {
500  m_OutputQueue.clear();
501  m_OutputQueue.reserve(graph.GetNumOutputs());
502  for (const BindableLayer* outputLayer : graph.GetOutputLayers())
503  {
504  const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId());
505  EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
506  }
507  }
508 
509  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
510  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
511  ProfilingGuid inferenceGuid = m_ProfilingService.GetNextGuid();
512  if (timelineUtils)
513  {
514  // Add inference timeline trace if profiling is enabled.
515  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
516  timelineUtils->CreateTypedEntity(inferenceGuid, LabelsAndEventClasses::INFERENCE_GUID);
517  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink,
518  networkGuid,
519  inferenceGuid,
521  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
522  }
523 
524  bool executionSucceeded = true;
525 
526  {
527  if (m_ProfilingService.IsProfilingEnabled())
528  {
529  m_ProfilingService.IncrementCounterValue(armnn::profiling::INFERENCES_RUN);
530  }
532  ARMNN_SCOPED_HEAP_PROFILING("Executing");
533  executionSucceeded = Execute(timelineUtils, inferenceGuid);
534  }
535 
536  if (timelineUtils)
537  {
538  // Add end of life of the inference timeline if profiling is enabled.
539  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
540  timelineUtils->Commit();
541  }
542  return executionSucceeded ? Status::Success : Status::Failure;
543 }
544 
545 void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo)
546 {
547  if (layer.GetType() != LayerType::Input)
548  {
549  throw InvalidArgumentException("EnqueueInput: given layer not an InputLayer");
550  }
551 
552  if (tensorHandle == nullptr)
553  {
554  throw InvalidArgumentException("EnqueueInput: tensorHandle must not be NULL");
555  }
556 
557  InputQueueDescriptor inputQueueDescriptor;
559 
560  inputQueueDescriptor.m_Inputs.push_back(tensorHandle);
561  info.m_InputTensorInfos.push_back(tensorInfo);
562 
563  ARMNN_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
564  const OutputHandler& handler = layer.GetOutputHandler();
565  const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
566  ITensorHandle* outputTensorHandle = handler.GetData();
567  ARMNN_ASSERT_MSG(outputTensorHandle != nullptr,
568  "Data should have been allocated.");
569  inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle);
570  info.m_OutputTensorInfos.push_back(outputTensorInfo);
571 
572  MemorySourceFlags importFlags = outputTensorHandle->GetImportFlags();
573  bool needMemCopy = true;
574  if (m_IsImportEnabled) // Try import the input tensor
575  {
576  if(CheckFlag(importFlags, MemorySource::Malloc) )
577  {
578  needMemCopy = false;
579  // This assumes a CPU Tensor handle
580  void* mem = tensorHandle->Map(false);
581  if (outputTensorHandle->Import(mem, MemorySource::Malloc))
582  {
583  tensorHandle->Unmap();
584  return; // No need for a workload since the import has been done.
585  }
586  tensorHandle->Unmap();
587  throw MemoryImportException("EnqueueInput: Memory Import failed");
588  }
589  }
590  if (needMemCopy)
591  {
592  // Create a mem copy workload for input since we did not import
593  std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor, info);
594 
595  ARMNN_ASSERT_MSG(inputWorkload, "No input workload created");
596 
597  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
598  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
599  if (timelineUtils)
600  {
601  // Add Input Workload to the post-optimisation network structure
602  AddWorkloadStructure(timelineUtils, inputWorkload, layer);
603  timelineUtils->Commit();
604  }
605 
606  m_InputQueue.push_back(move(inputWorkload));
607  }
608 }
609 
610 void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo)
611 {
612  if (layer.GetType() != LayerType::Output)
613  {
614  throw InvalidArgumentException("EnqueueOutput: given layer not an OutputLayer");
615  }
616 
617  if (tensorHandle == nullptr)
618  {
619  throw InvalidArgumentException("EnqueueOutput: tensorHandle must not be NULL");
620  }
621 
622  OutputQueueDescriptor outputQueueDescriptor;
624 
625  outputQueueDescriptor.m_Outputs.push_back(tensorHandle);
626  info.m_OutputTensorInfos.push_back(tensorInfo);
627 
628  ARMNN_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
629 
630  // Gets the output handler from the previous node.
631  const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
632 
633  const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
634  ITensorHandle* inputTensorHandle = outputHandler.GetData();
635  ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
636 
637  // Try import the output tensor.
638  // Note: We can only import the output pointer if all of the following hold true:
639  // a) The imported pointer is aligned sufficiently
640  // b) The tensor has zero padding
641  // c) There is only one connection to the OutputSlot and it is to an OutputLayer.
642  // d) The output pointer is allocated via malloc. (Other types will be supported in a later release)
643  // e) m_IsExportEnabled must be set to true
644  bool needMemCopy = true;
645  if (m_IsExportEnabled && (layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetNumConnections() == 1))
646  {
647  if(layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOwningLayer().GetType() != LayerType::Input)
648  {
649  MemorySourceFlags importFlags = inputTensorHandle->GetImportFlags();
650  if (CheckFlag(importFlags, MemorySource::Malloc))
651  {
652  needMemCopy = false;
653  void *mem = tensorHandle->Map(false);
654  bool importOk = inputTensorHandle->Import(mem, MemorySource::Malloc);
655  tensorHandle->Unmap();
656 
657  if (importOk)
658  {
659  // Insert synchronization workload
660  MemSyncQueueDescriptor syncDesc;
661  syncDesc.m_Inputs.push_back(inputTensorHandle);
662  info.m_InputTensorInfos.push_back(inputTensorInfo);
663  auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
664  ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
665  m_OutputQueue.push_back(move(syncWorkload));
666  }
667  else
668  {
669  throw MemoryExportException("EnqueueOutput: Memory Export failed");
670  }
671  }
672  }
673  }
674  if (needMemCopy)
675  {
676  // If we got here then we didn't export the memory, so add an output workload which performs a memcopy.
677  outputQueueDescriptor.m_Inputs.push_back(inputTensorHandle);
678  info.m_InputTensorInfos.push_back(inputTensorInfo);
679 
680  std::unique_ptr<IWorkload> outputWorkload =
681  std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info);
682  ARMNN_ASSERT_MSG(outputWorkload, "No output workload created");
683 
684  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
685  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
686  if (timelineUtils)
687  {
688  // Add Output Workload to the post-optimisation network structure
689  AddWorkloadStructure(timelineUtils, outputWorkload, layer);
690  timelineUtils->Commit();
691  }
692 
693  m_OutputQueue.push_back(move(outputWorkload));
694  }
695 }
696 
697 void LoadedNetwork::AllocateWorkingMemory(std::lock_guard<std::mutex>& lock)
698 {
699  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Working Memory Allocation");
700 
701  // this unused parameter makes sure we can only call this function with a valid lock
702  IgnoreUnused(lock);
703 
704  if (m_IsWorkingMemAllocated)
705  {
706  return;
707  }
708  for (auto&& workloadFactory : m_WorkloadFactories)
709  {
710  IBackendInternal::IMemoryManagerSharedPtr memoryManager = workloadFactory.second.second;
711  if (memoryManager)
712  {
713  memoryManager->Acquire();
714  }
715  }
716  m_TensorHandleFactoryRegistry.AquireMemory();
717  m_IsWorkingMemAllocated = true;
718 }
719 
721 {
722  std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
723  if (!m_IsWorkingMemAllocated)
724  {
725  return;
726  }
727  // Informs the memory managers to release memory in it's respective memory group
728  for (auto&& workloadFactory : m_WorkloadFactories)
729  {
730  IBackendInternal::IMemoryManagerSharedPtr memoryManager = workloadFactory.second.second;
731  if (memoryManager)
732  {
733  memoryManager->Release();
734  }
735  }
736  m_TensorHandleFactoryRegistry.ReleaseMemory();
737  m_IsWorkingMemAllocated = false;
738 }
739 
740 bool LoadedNetwork::Execute(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
741  profiling::ProfilingGuid inferenceGuid)
742 {
743  bool success = true;
744 
745  auto Fail = [&](const std::exception& error)
746  {
747  ARMNN_LOG(error) << "An error occurred attempting to execute a workload: " << error.what();
748  success = false;
749  };
750 
751  try
752  {
753  std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
754  AllocateWorkingMemory(lockGuard);
755 
756  ProfilingDynamicGuid workloadInferenceID(0);
757  auto ExecuteQueue = [&timelineUtils, &workloadInferenceID, &inferenceGuid](WorkloadQueue& queue)
758  {
759  for (auto& workload : queue)
760  {
761  if(timelineUtils)
762  {
763  workloadInferenceID = timelineUtils->RecordWorkloadInferenceAndStartOfLifeEvent(workload->GetGuid(),
764  inferenceGuid);
765  }
766  workload->Execute();
767  if(timelineUtils)
768  {
769  timelineUtils->RecordEndOfLifeEvent(workloadInferenceID);
770  }
771  }
772  };
773 
774  ExecuteQueue(m_InputQueue);
775  ExecuteQueue(m_WorkloadQueue);
776  ExecuteQueue(m_OutputQueue);
777  }
778  catch (const RuntimeException& error)
779  {
780  Fail(error);
781  }
782  catch (const std::runtime_error& error)
783  {
784  Fail(error);
785  }
786 
787  return success;
788 }
789 
791 {
792  for (auto&& workloadPtr: m_WorkloadQueue)
793  {
794  workloadPtr.get()->RegisterDebugCallback(func);
795  }
796 }
797 
798 }
static ARMNN_DLLEXPORT ProfilingStaticGuid INFERENCE_GUID
virtual bool Import(void *memory, MemorySource source)
Import externally allocated memory.
FactoryFunction GetFactory(const BackendId &id) const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:310
std::vector< std::unique_ptr< IWorkload > > WorkloadQueue
const bool m_ImportEnabled
Definition: IRuntime.hpp:33
void RegisterProfiler(Profiler *profiler)
Definition: Profiling.cpp:493
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:486
virtual unsigned int GetImportFlags() const
Get flags describing supported import sources.
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
Definition: Types.hpp:327
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
unsigned int MemorySourceFlags
size_t GetNumOutputs() const
Definition: Graph.hpp:181
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:234
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:275
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:311
static ARMNN_DLLEXPORT ProfilingStaticGuid WORKLOAD_GUID
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:175
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:202
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_SOL_EVENT_CLASS
virtual IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr &memoryManager=nullptr) const =0
std::vector< TensorInfo > m_InputTensorInfos
static ARMNN_DLLEXPORT ProfilingStaticGuid LAYER_GUID
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
bool SupportsTensorAllocatorAPI() const
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
static ARMNN_DLLEXPORT ProfilingStaticGuid EXECUTION_OF_GUID
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
const std::string & GetNameStr() const
Definition: Layer.hpp:217
Status
enumeration
Definition: Types.hpp:26
const bool m_ExportEnabled
Definition: IRuntime.hpp:34
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::vector< TensorInfo > m_OutputTensorInfos
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
const BackendId & GetBackendId() const
Definition: Layer.hpp:266
OutputLayersAccessor GetOutputLayers() const
Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-bas...
Definition: Graph.hpp:189
static ARMNN_DLLEXPORT ProfilingStaticGuid NETWORK_GUID
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
profiling::ProfilingGuid GetNetworkGuid()
virtual void Unmap() const =0
Unmap the tensor data.
std::vector< ITensorHandle * > m_Outputs
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:222
const std::string & Get() const
Definition: BackendId.hpp:136
void RegisterDebugCallback(const DebugCallbackFunction &func)
LayerType GetType() const
Definition: Layer.hpp:262
Contains information about inputs and outputs to a layer.
bool CheckFlag(MemorySourceFlags flags, MemorySource source)
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177
InputLayersAccessor GetInputLayers() const
Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-base...
Definition: Graph.hpp:185
std::vector< ITensorHandle * > m_Inputs
static ARMNN_DLLEXPORT ProfilingStaticGuid PROCESS_ID_GUID
size_t GetNumLayers() const
Definition: Graph.hpp:191
virtual ARMNN_NO_DEPRECATE_WARN_END IMemoryManagerUniquePtr CreateMemoryManager() const
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
size_t GetNumInputs() const
Definition: Graph.hpp:180
static ARMNN_DLLEXPORT ProfilingStaticGuid BACKENDID_GUID
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< OptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
static ARMNN_DLLEXPORT ProfilingStaticGuid CHILD_GUID