ArmNN
 22.11
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "Runtime.hpp"
8 
10 
11 #include <armnn/Version.hpp>
13 #include <armnn/BackendHelper.hpp>
14 #include <armnn/Logging.hpp>
15 
17 
19 
21 #include <armnn/utility/Timer.hpp>
22 
25 
26 #include <client/include/backends/IBackendProfiling.hpp>
27 
28 #include <common/include/LabelsAndEventClasses.hpp>
29 
30 #include <iostream>
31 
32 
33 using namespace armnn;
34 using namespace std;
35 
36 namespace armnn
37 {
39 
41 
42 IRuntime::~IRuntime() = default;
43 
45 {
46  return new IRuntime(options);
47 }
48 
50 {
51  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
52 }
53 
55 {
56  delete runtime;
57 }
58 
60 {
61  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
62 }
63 
65  IOptimizedNetworkPtr network,
66  std::string& errorMessage)
67 {
68  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
69 }
70 
72  IOptimizedNetworkPtr network,
73  std::string& errorMessage,
74  const INetworkProperties& networkProperties)
75 {
76  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
77 }
78 
80 {
81  return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
82 }
83 
85 {
86  return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
87 }
88 
89 std::vector<ImportedInputId> IRuntime::ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
90  MemorySource forceImportMemorySource)
91 {
92  return pRuntimeImpl->ImportInputs(networkId, inputTensors, forceImportMemorySource);
93 }
94 
95 std::vector<ImportedOutputId> IRuntime::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
96  MemorySource forceImportMemorySource)
97 {
98  return pRuntimeImpl->ImportOutputs(networkId, outputTensors, forceImportMemorySource);
99 }
100 
101 void IRuntime::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
102 {
103  return pRuntimeImpl->ClearImportedInputs(networkId, inputIds);
104 }
105 void IRuntime::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
106 {
107  return pRuntimeImpl->ClearImportedOutputs(networkId, outputIds);
108 }
109 
111  const InputTensors& inputTensors,
112  const OutputTensors& outputTensors,
113  std::vector<ImportedInputId> preImportedInputIds,
114  std::vector<ImportedOutputId> preImportedOutputIds)
115 {
116  return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors,
117  preImportedInputIds, preImportedOutputIds);
118 }
119 
121  const InputTensors& inputTensors,
122  const OutputTensors& outputTensors,
123  std::vector<ImportedInputId> preImportedInputs,
124  std::vector<ImportedOutputId> preImportedOutputs)
125 {
126  return pRuntimeImpl->Execute(workingMemHandle,
127  inputTensors,
128  outputTensors,
129  preImportedInputs,
130  preImportedOutputs);
131 }
132 
134 {
135  return pRuntimeImpl->UnloadNetwork(networkId);
136 }
137 
139 {
140  return pRuntimeImpl->GetDeviceSpec();
141 }
142 
143 std::unique_ptr<IWorkingMemHandle> IRuntime::CreateWorkingMemHandle(NetworkId networkId)
144 {
145  return pRuntimeImpl->CreateWorkingMemHandle(networkId);
146 }
147 
148 const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
149 {
150  return pRuntimeImpl->GetProfiler(networkId);
151 }
152 
154 {
155  return pRuntimeImpl->RegisterDebugCallback(networkId, func);
156 }
157 
158 int RuntimeImpl::GenerateNetworkId()
159 {
160  return m_NetworkIdCounter++;
161 }
162 
164 {
165  std::string ignoredErrorMessage;
166  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
167 }
168 
170  IOptimizedNetworkPtr inNetwork,
171  std::string& errorMessage)
172 {
173  INetworkProperties networkProperties(
175  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
176 }
177 
179  IOptimizedNetworkPtr inNetwork,
180  std::string& errorMessage,
181  const INetworkProperties& networkProperties)
182 {
183  // Register the profiler
184  auto profiler = inNetwork->GetProfiler();
186 
187  IOptimizedNetwork* rawNetwork = inNetwork.release();
188 
189  networkIdOut = GenerateNetworkId();
190 
191  for (auto&& context : m_BackendContexts)
192  {
193  context.second->BeforeLoadNetwork(networkIdOut);
194  }
195 
196  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
197  std::unique_ptr<IOptimizedNetwork>(rawNetwork),
198  errorMessage,
199  networkProperties,
200  m_ProfilingService.get());
201 
202  if (!loadedNetwork)
203  {
204  return Status::Failure;
205  }
206 
207  {
208 #if !defined(ARMNN_DISABLE_THREADS)
209  std::lock_guard<std::mutex> lockGuard(m_Mutex);
210 #endif
211 
212  // Stores the network
213  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
214  }
215 
216  for (auto&& context : m_BackendContexts)
217  {
218  context.second->AfterLoadNetwork(networkIdOut);
219  }
220 
221  if (m_ProfilingService->IsProfilingEnabled())
222  {
223  m_ProfilingService->IncrementCounterValue(arm::pipe::NETWORK_LOADS);
224  }
225 
226  return Status::Success;
227 }
228 
230 {
231  bool unloadOk = true;
232  for (auto&& context : m_BackendContexts)
233  {
234  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
235  }
236 
237  if (!unloadOk)
238  {
239  ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
240  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
241  return Status::Failure;
242  }
243 
244  std::unique_ptr<arm::pipe::TimelineUtilityMethods> timelineUtils =
245  arm::pipe::TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService.get());
246  {
247 #if !defined(ARMNN_DISABLE_THREADS)
248  std::lock_guard<std::mutex> lockGuard(m_Mutex);
249 #endif
250 
251  // If timeline recording is on mark the Network end of life
252  if (timelineUtils)
253  {
254  auto search = m_LoadedNetworks.find(networkId);
255  if (search != m_LoadedNetworks.end())
256  {
257  arm::pipe::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
258  timelineUtils->RecordEvent(networkGuid,
259  arm::pipe::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
260  }
261  }
262 
263  if (m_LoadedNetworks.erase(networkId) == 0)
264  {
265  ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
266  return Status::Failure;
267  }
268 
269  if (m_ProfilingService->IsProfilingEnabled())
270  {
271  m_ProfilingService->IncrementCounterValue(arm::pipe::NETWORK_UNLOADS);
272  }
273  }
274 
275  for (auto&& context : m_BackendContexts)
276  {
277  context.second->AfterUnloadNetwork(networkId);
278  }
279 
280  // Unregister the profiler
282 
283  ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
284  return Status::Success;
285 }
286 
287 const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
288 {
289  auto it = m_LoadedNetworks.find(networkId);
290  if (it != m_LoadedNetworks.end())
291  {
292  auto& loadedNetwork = it->second;
293  return loadedNetwork->GetProfiler();
294  }
295 
296  return nullptr;
297 }
298 
299 void RuntimeImpl::ReportStructure(arm::pipe::IProfilingService& profilingService)
300 {
301  if (profilingService.IsProfilingEnabled())
302  {
303  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
304  while (it != m_LoadedNetworks.end())
305  {
306  auto& loadedNetwork = it->second;
307  loadedNetwork->SendNetworkStructure(profilingService);
308  // Increment the Iterator to point to next entry
309  it++;
310  }
311  }
312 }
313 
314 void RuntimeImpl::InitialiseProfilingService(arm::pipe::IProfilingService& profilingService)
315 {
317  initialiser.InitialiseProfilingService(profilingService);
318 }
319 
321  : m_NetworkIdCounter(0)
322 {
323  m_ProfilingService = arm::pipe::IProfilingService::CreateProfilingService(
324  arm::pipe::MAX_ARMNN_COUNTER,
325  *this,
326  arm::pipe::ARMNN_SOFTWARE_INFO,
327  arm::pipe::ARMNN_SOFTWARE_VERSION,
328  arm::pipe::ARMNN_HARDWARE_VERSION,
329  *this);
330  const auto start_time = armnn::GetTimeNow();
331  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION;
333  {
334  throw RuntimeException(
335  "It is not possible to enable timeline reporting without profiling being enabled");
336  }
337 
338  // Load any available/compatible dynamic backend before the runtime
339  // goes through the backend registry
340  LoadDynamicBackends(options.m_DynamicBackendsPath);
341 
342  armnn::BackendIdSet supportedBackends;
343  for (const auto& id : BackendRegistryInstance().GetBackendIds())
344  {
345  // Store backend contexts for the supported ones
346  try {
347  auto factoryFun = BackendRegistryInstance().GetFactory(id);
348  ARMNN_ASSERT(factoryFun != nullptr);
349  auto backend = factoryFun();
350  ARMNN_ASSERT(backend != nullptr);
351  ARMNN_ASSERT(backend.get() != nullptr);
352 
353  auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
354  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
355  customAllocatorMapIterator->second == nullptr)
356  {
357  // We need to manually clean up the dynamic backends before throwing an exception.
359  m_DeviceSpec.ClearDynamicBackends();
360  throw armnn::Exception("Allocator associated with id " + id.Get() + " is null");
361  }
362 
363  // If the runtime is created in protected mode only add backends that support this mode
364  if (options.m_ProtectedMode)
365  {
366  // check if backend supports ProtectedMode
368  BackendCapability protectedContentCapability {"ProtectedContentAllocation", true};
369  if (!HasCapability(protectedContentCapability, id))
370  {
371  // Protected Content Allocation is not supported by the backend
372  // backend should not be registered
373  ARMNN_LOG(warning) << "Backend "
374  << id
375  << " is not registered as does not support protected content allocation.";
376  continue;
377  }
378  // The user is responsible to provide a custom memory allocator which allows to allocate
379  // protected memory
380  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
381  {
382  std::string err;
383  if (customAllocatorMapIterator->second->GetMemorySourceType()
385  {
386  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
387  {
388  ARMNN_LOG(error) << "The backend "
389  << id
390  << " reported an error when entering protected mode. Backend won't be"
391  << " used. ErrorMsg: " << err;
392  continue;
393  }
394  // No errors so register the Custom Allocator with the BackendRegistry
395  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
396  }
397  else
398  {
399  ARMNN_LOG(error) << "The CustomAllocator provided with the runtime options doesn't support "
400  "protected memory. Protected mode can't be activated. The backend "
401  << id
402  << " is not going to be used. MemorySource must be MemorySource::DmaBufProtected";
403  continue;
404  }
405  }
406  else
407  {
408  ARMNN_LOG(error) << "Protected mode can't be activated for backend: "
409  << id
410  << " no custom allocator was provided to the runtime options.";
411  continue;
412  }
413  }
414  else
415  {
416  // If a custom memory allocator is provided make the backend use that instead of the default
417  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
418  {
419  std::string err;
420  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
421  {
422  ARMNN_LOG(error) << "The backend "
423  << id
424  << " reported an error when trying to use the provided custom allocator."
425  " Backend won't be used."
426  << " ErrorMsg: " << err;
427  continue;
428  }
429  // No errors so register the Custom Allocator with the BackendRegistry
430  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
431  }
432  }
433 
434  // check if custom memory optimizer strategy map is set
435  if (!options.m_MemoryOptimizerStrategyMap.empty())
436  {
437  auto customMemoryOptimizerStrategyMapIterator = options.m_MemoryOptimizerStrategyMap.find(id);
438  // if a memory optimizer strategy is provided make the backend use that instead of the default
439  if (customMemoryOptimizerStrategyMapIterator != options.m_MemoryOptimizerStrategyMap.end())
440  {
441  // no errors.. register the memory optimizer strategy with the BackendRegistry
443  id, customMemoryOptimizerStrategyMapIterator->second);
444 
445  ARMNN_LOG(info) << "MemoryOptimizerStrategy "
446  << customMemoryOptimizerStrategyMapIterator->second->GetName()
447  << " set for the backend " << id << ".";
448  }
449  }
450  else
451  {
452  // check if to use one of the existing memory optimizer strategies is set
453  std::string memoryOptimizerStrategyName = "";
454  ParseOptions(options.m_BackendOptions, id, [&](std::string name, const BackendOptions::Var& value)
455  {
456  if (name == "MemoryOptimizerStrategy")
457  {
458  memoryOptimizerStrategyName = ParseStringBackendOption(value, "");
459  }
460  });
461  if (memoryOptimizerStrategyName != "")
462  {
463  std::shared_ptr<IMemoryOptimizerStrategy> strategy =
464  GetMemoryOptimizerStrategy(memoryOptimizerStrategyName);
465 
466  if (!strategy)
467  {
468  ARMNN_LOG(warning) << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName
469  << " was not found.";
470  }
471  else
472  {
474  auto strategyType = GetMemBlockStrategyTypeName(strategy->GetMemBlockStrategyType());
475  BackendCapability memOptimizeStrategyCapability {strategyType, true};
476  if (HasCapability(memOptimizeStrategyCapability, id))
477  {
479 
480  ARMNN_LOG(info) << "MemoryOptimizerStrategy: "
481  << memoryOptimizerStrategyName << " set for the backend " << id << ".";
482  }
483  else
484  {
485  ARMNN_LOG(warning) << "Backend "
486  << id
487  << " does not have multi-axis packing capability and cannot support"
488  << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName << ".";
489  }
490  }
491  }
492  }
493 
494  auto context = backend->CreateBackendContext(options);
495 
496  // backends are allowed to return nullptrs if they
497  // don't wish to create a backend specific context
498  if (context)
499  {
500  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
501  }
502  supportedBackends.emplace(id);
503 
504  unique_ptr<arm::pipe::IBackendProfiling> profilingIface =
505  arm::pipe::IBackendProfiling::CreateBackendProfiling(
507  *m_ProfilingService.get(),
508  id.Get());
509 
510  // Backends may also provide a profiling context. Ask for it now.
511  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
512  // Backends that don't support profiling will return a null profiling context.
513  if (profilingContext)
514  {
515  // Pass the context onto the profiling service.
516  m_ProfilingService->AddBackendProfilingContext(id, profilingContext);
517  }
518  }
519  catch (const BackendUnavailableException&)
520  {
521  // Ignore backends which are unavailable
522  }
523  }
524 
525  BackendRegistryInstance().SetProfilingService(*m_ProfilingService.get());
526  // pass configuration info to the profiling service
527  m_ProfilingService->ConfigureProfilingService(
528  arm::pipe::ConvertExternalProfilingOptions(options.m_ProfilingOptions));
529  if (options.m_ProfilingOptions.m_EnableProfiling)
530  {
531  // try to wait for the profiling service to initialise
532  m_ProfilingService->WaitForProfilingServiceActivation(3000);
533  }
534 
535  m_DeviceSpec.AddSupportedBackends(supportedBackends);
536 
537  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
538  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms.";
539 }
540 
542 {
543  const auto startTime = armnn::GetTimeNow();
544  std::vector<int> networkIDs;
545  try
546  {
547  // Coverity fix: The following code may throw an exception of type std::length_error.
548  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
549  std::back_inserter(networkIDs),
550  [](const auto &pair) { return pair.first; });
551  }
552  catch (const std::exception& e)
553  {
554  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
555  // exception of type std::length_error.
556  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
557  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
558  << "\nSome of the loaded networks may not be unloaded" << std::endl;
559  }
560  // We then proceed to unload all the networks which IDs have been appended to the list
561  // up to the point the exception was thrown (if any).
562 
563  for (auto networkID : networkIDs)
564  {
565  try
566  {
567  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
568  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
569  UnloadNetwork(networkID);
570  }
571  catch (const std::exception& e)
572  {
573  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
574  // exception of type std::length_error.
575  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
576  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
577  << std::endl;
578  }
579  }
580 
581  // Clear all dynamic backends.
583  m_DeviceSpec.ClearDynamicBackends();
584  m_BackendContexts.clear();
585 
587  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
588  << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
589 }
590 
591 LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
592 {
593 #if !defined(ARMNN_DISABLE_THREADS)
594  std::lock_guard<std::mutex> lockGuard(m_Mutex);
595 #endif
596  return m_LoadedNetworks.at(networkId).get();
597 }
598 
600 {
601  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
602 }
603 
605 {
606  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
607 }
608 
609 std::vector<ImportedInputId> RuntimeImpl::ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
610  MemorySource forceImportMemorySource)
611 {
612  return GetLoadedNetworkPtr(networkId)->ImportInputs(inputTensors, forceImportMemorySource);
613 }
614 
615 std::vector<ImportedOutputId> RuntimeImpl::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
616  MemorySource forceImportMemorySource)
617 {
618  return GetLoadedNetworkPtr(networkId)->ImportOutputs(outputTensors, forceImportMemorySource);
619 }
620 
621 void RuntimeImpl::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
622 {
623  return GetLoadedNetworkPtr(networkId)->ClearImportedInputs(inputIds);
624 }
625 void RuntimeImpl::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
626 {
627  return GetLoadedNetworkPtr(networkId)->ClearImportedOutputs(outputIds);
628 }
629 
631  const InputTensors& inputTensors,
632  const OutputTensors& outputTensors,
633  std::vector<ImportedInputId> preImportedInputIds,
634  std::vector<ImportedOutputId> preImportedOutputIds)
635 {
636  const auto startTime = armnn::GetTimeNow();
637 
638  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
639 
640  if (!loadedNetwork)
641  {
642  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
643  return Status::Failure;
644  }
645  if (loadedNetwork->IsAsyncEnabled())
646  {
647  ARMNN_LOG(error) << "Network " << networkId << " is async enabled.";
648  return Status::Failure;
649  }
651 
653 
654  static thread_local NetworkId lastId = networkId;
655  if (lastId != networkId)
656  {
657  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
658  {
659  network->FreeWorkingMemory();
660  });
661  }
662  lastId=networkId;
663 
664  auto status = loadedNetwork->EnqueueWorkload(inputTensors, outputTensors,
665  preImportedInputIds, preImportedOutputIds);
666 
667 
668  // Check if we imported, if not there's no need to call the After EnqueueWorkload events
669  if (!preImportedInputIds.empty() || !preImportedOutputIds.empty())
670  {
671  // Call After EnqueueWorkload events
672  for (auto&& context : m_BackendContexts)
673  {
674  context.second->AfterEnqueueWorkload(networkId);
675  }
676  }
677  ARMNN_LOG(info) << "Execution time: " << std::setprecision(2)
678  << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
679  return status;
680 }
681 
683  const InputTensors& inputTensors,
684  const OutputTensors& outputTensors,
685  std::vector<ImportedInputId> preImportedInputs,
686  std::vector<ImportedOutputId> preImportedOutputs)
687 {
688  const auto startTime = armnn::GetTimeNow();
689 
690  NetworkId networkId = iWorkingMemHandle.GetNetworkId();
691  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
692 
693  if (!loadedNetwork)
694  {
695  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
696  return Status::Failure;
697  }
698  if (!loadedNetwork->IsAsyncEnabled())
699  {
700  ARMNN_LOG(error) << "Attempting execute " << networkId << " when it is not async enabled.";
701  return Status::Failure;
702  }
704 
706 
707  auto status = loadedNetwork->Execute(inputTensors,
708  outputTensors,
709  iWorkingMemHandle,
710  preImportedInputs,
711  preImportedOutputs);
712 
713  ARMNN_LOG(info) << "Execution time: " << std::setprecision(2)
714  << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
715 
716  return status;
717 }
718 
719 /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
720 /// overlapped Execution by calling this function from different threads.
721 std::unique_ptr<IWorkingMemHandle> RuntimeImpl::CreateWorkingMemHandle(NetworkId networkId)
722 {
723  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
724 
725  if (!loadedNetwork)
726  {
727  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
728  return nullptr;
729  }
730  if (!loadedNetwork->IsAsyncEnabled())
731  {
732  ARMNN_LOG(error) << "Network " << networkId << " is not async enabled.";
733  return nullptr;
734  }
736 
737  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CreateWorkingMemHandle");
738 
739  static thread_local NetworkId lastId = networkId;
740  if (lastId != networkId)
741  {
742  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
743  {
744  network->FreeWorkingMemory();
745  });
746  }
747  lastId=networkId;
748 
749  return loadedNetwork->CreateWorkingMemHandle(networkId);
750 }
751 
753 {
754  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
755  loadedNetwork->RegisterDebugCallback(func);
756 }
757 
758 void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
759 {
760  // Get the paths where to load the dynamic backends from
761  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
762 
763  // Get the shared objects to try to load as dynamic backends
764  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
765 
766  // Create a list of dynamic backends
767  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
768 
769  // Register the dynamic backends in the backend registry
770  armnn::BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
771 
772  // Add the registered dynamic backend ids to the list of supported backends
773  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
774 }
775 
776 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
Thread safe execution of the loaded network.
armnn::TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:599
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:49
FactoryFunction GetFactory(const BackendId &id) const
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:148
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
Very basic type safe variant.
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
virtual NetworkId GetNetworkId()=0
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:193
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:593
void ParseOptions(const std::vector< BackendOptions > &options, BackendId backend, F f)
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs, std::vector< ImportedOutputId > preImportedOutputs)
This is an experimental function.
Definition: Runtime.cpp:682
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition: Runtime.cpp:229
void RegisterMemoryOptimizerStrategy(const BackendId &id, std::shared_ptr< IMemoryOptimizerStrategy > strategy)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:41
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:79
void RegisterAllocator(const BackendId &id, std::shared_ptr< ICustomAllocator > alloc)
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition: Runtime.cpp:163
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:604
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
std::map< BackendId, std::shared_ptr< IMemoryOptimizerStrategy > > m_MemoryOptimizerStrategyMap
A map to define a custom memory optimizer strategy for specific backend Ids.
Definition: IRuntime.hpp:130
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< ImportedInputId > ImportInputs(const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
Definition: IRuntime.hpp:146
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:752
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:287
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:379
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
void InitialiseProfilingService(arm::pipe::IProfilingService &profilingService) override
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:54
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:153
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:197
BackendCapability
BackendCapability class.
Definition: Types.hpp:267
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
int NetworkId
Definition: IRuntime.hpp:35
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:133
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Un-import and delete the imported InputTensor/s This function is not thread safe and must not be used...
Definition: Runtime.cpp:101
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource)
Definition: Runtime.cpp:615
Status
enumeration
Definition: Types.hpp:42
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:254
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Definition: Runtime.cpp:621
std::map< BackendId, std::shared_ptr< ICustomAllocator > > m_CustomAllocatorMap
A map to define a custom memory allocator for specific backend Ids.
Definition: IRuntime.hpp:122
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
This is an experimental function.
Definition: Runtime.cpp:120
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition: IRuntime.hpp:302
ProfilingOptions ConvertExternalProfilingOptions(const armnn::IRuntime::CreationOptions::ExternalProfilingOptions &options)
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:110
void ClearImportedInputs(const std::vector< ImportedInputId > inputIds)
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition: Runtime.cpp:59
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:280
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:44
std::unique_ptr< IMemoryOptimizerStrategy > GetMemoryOptimizerStrategy(const std::string &strategyName)
void ReportStructure(arm::pipe::IProfilingService &profilingService) override
Definition: Runtime.cpp:299
const IDeviceSpec & GetDeviceSpec() const
Definition: Runtime.cpp:138
bool m_ProtectedMode
Setting this flag will allow the user to create the Runtime in protected mode.
Definition: IRuntime.hpp:113
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Definition: Runtime.cpp:89
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:106
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:84
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Single thread execution of the loaded network.
void InitialiseProfilingService(arm::pipe::IProfilingService &profilingService) override
Definition: Runtime.cpp:314
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:600
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Definition: Runtime.cpp:630
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
const std::shared_ptr< IProfiler > & GetProfiler() const
std::vector< ImportedOutputId > ImportOutputs(const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:230
void RegisterDebugCallback(const DebugCallbackFunction &func)
constexpr const char * GetMemBlockStrategyTypeName(MemBlockStrategyType memBlockStrategyType)
Definition: TypesUtils.hpp:264
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:320
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:143
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
Definition: IRuntime.hpp:148
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
Definition: Runtime.cpp:95
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Un-import and delete the imported OutputTensor/s This function is not thread safe and must not be use...
Definition: Runtime.cpp:105
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:721
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:161
void SetProfilingService(armnn::Optional< arm::pipe::IProfilingService &> profilingService)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, arm::pipe::IProfilingService *profilingService)
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Definition: Runtime.cpp:625
void ClearImportedOutputs(const std::vector< ImportedOutputId > outputIds)
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource)
Definition: Runtime.cpp:609