ArmNN
 22.02
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
10 #include <armnn/Logging.hpp>
11 #include <armnn/utility/Timer.hpp>
12 
17 
18 #include <common/include/LabelsAndEventClasses.hpp>
19 
20 #include <iostream>
21 
23 
24 using namespace armnn;
25 using namespace std;
26 
27 namespace armnn
28 {
30 
32 
33 IRuntime::~IRuntime() = default;
34 
36 {
37  return new IRuntime(options);
38 }
39 
41 {
42  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
43 }
44 
46 {
47  delete runtime;
48 }
49 
51 {
52  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
53 }
54 
56  IOptimizedNetworkPtr network,
57  std::string& errorMessage)
58 {
59  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
60 }
61 
63  IOptimizedNetworkPtr network,
64  std::string& errorMessage,
65  const INetworkProperties& networkProperties)
66 {
67  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
68 }
69 
71 {
72  return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
73 }
74 
76 {
77  return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
78 }
79 
80 std::vector<ImportedInputId> IRuntime::ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
81  MemorySource forceImportMemorySource)
82 {
83  return pRuntimeImpl->ImportInputs(networkId, inputTensors, forceImportMemorySource);
84 }
85 
86 std::vector<ImportedOutputId> IRuntime::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
87  MemorySource forceImportMemorySource)
88 {
89  return pRuntimeImpl->ImportOutputs(networkId, outputTensors, forceImportMemorySource);
90 }
91 
92 void IRuntime::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
93 {
94  return pRuntimeImpl->ClearImportedInputs(networkId, inputIds);
95 }
96 void IRuntime::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
97 {
98  return pRuntimeImpl->ClearImportedOutputs(networkId, outputIds);
99 }
100 
102  const InputTensors& inputTensors,
103  const OutputTensors& outputTensors,
104  std::vector<ImportedInputId> preImportedInputIds,
105  std::vector<ImportedOutputId> preImportedOutputIds)
106 {
107  return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors,
108  preImportedInputIds, preImportedOutputIds);
109 }
110 
112  const InputTensors& inputTensors,
113  const OutputTensors& outputTensors,
114  std::vector<ImportedInputId> preImportedInputs,
115  std::vector<ImportedOutputId> preImportedOutputs)
116 {
117  return pRuntimeImpl->Execute(workingMemHandle, inputTensors, outputTensors, preImportedInputs, preImportedOutputs);
118 }
119 
121 {
122  return pRuntimeImpl->UnloadNetwork(networkId);
123 }
124 
126 {
127  return pRuntimeImpl->GetDeviceSpec();
128 }
129 
130 std::unique_ptr<IWorkingMemHandle> IRuntime::CreateWorkingMemHandle(NetworkId networkId)
131 {
132  return pRuntimeImpl->CreateWorkingMemHandle(networkId);
133 }
134 
135 const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
136 {
137  return pRuntimeImpl->GetProfiler(networkId);
138 }
139 
141 {
142  return pRuntimeImpl->RegisterDebugCallback(networkId, func);
143 }
144 
145 int RuntimeImpl::GenerateNetworkId()
146 {
147  return m_NetworkIdCounter++;
148 }
149 
151 {
152  std::string ignoredErrorMessage;
153  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
154 }
155 
157  IOptimizedNetworkPtr inNetwork,
158  std::string& errorMessage)
159 {
160  INetworkProperties networkProperties(
162  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
163 }
164 
166  IOptimizedNetworkPtr inNetwork,
167  std::string& errorMessage,
168  const INetworkProperties& networkProperties)
169 {
170  // Register the profiler
171  auto profiler = inNetwork->GetProfiler();
173 
174  IOptimizedNetwork* rawNetwork = inNetwork.release();
175 
176  networkIdOut = GenerateNetworkId();
177 
178  for (auto&& context : m_BackendContexts)
179  {
180  context.second->BeforeLoadNetwork(networkIdOut);
181  }
182 
183  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
184  std::unique_ptr<IOptimizedNetwork>(rawNetwork),
185  errorMessage,
186  networkProperties,
187  m_ProfilingService);
188 
189  if (!loadedNetwork)
190  {
191  return Status::Failure;
192  }
193 
194  {
195  std::lock_guard<std::mutex> lockGuard(m_Mutex);
196 
197  // Stores the network
198  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
199  }
200 
201  for (auto&& context : m_BackendContexts)
202  {
203  context.second->AfterLoadNetwork(networkIdOut);
204  }
205 
206  if (m_ProfilingService.IsProfilingEnabled())
207  {
208  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
209  }
210 
211  return Status::Success;
212 }
213 
215 {
216  bool unloadOk = true;
217  for (auto&& context : m_BackendContexts)
218  {
219  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
220  }
221 
222  if (!unloadOk)
223  {
224  ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
225  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
226  return Status::Failure;
227  }
228 
229  std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
231  {
232  std::lock_guard<std::mutex> lockGuard(m_Mutex);
233 
234  // If timeline recording is on mark the Network end of life
235  if (timelineUtils)
236  {
237  auto search = m_LoadedNetworks.find(networkId);
238  if (search != m_LoadedNetworks.end())
239  {
240  profiling::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
241  timelineUtils->RecordEvent(networkGuid,
242  profiling::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
243  }
244  }
245 
246  if (m_LoadedNetworks.erase(networkId) == 0)
247  {
248  ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
249  return Status::Failure;
250  }
251 
252  if (m_ProfilingService.IsProfilingEnabled())
253  {
254  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
255  }
256  }
257 
258  for (auto&& context : m_BackendContexts)
259  {
260  context.second->AfterUnloadNetwork(networkId);
261  }
262 
263  // Unregister the profiler
265 
266  ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
267  return Status::Success;
268 }
269 
270 const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
271 {
272  auto it = m_LoadedNetworks.find(networkId);
273  if (it != m_LoadedNetworks.end())
274  {
275  auto& loadedNetwork = it->second;
276  return loadedNetwork->GetProfiler();
277  }
278 
279  return nullptr;
280 }
281 
282 void RuntimeImpl::ReportStructure() // armnn::profiling::IProfilingService& profilingService as param
283 {
284  // No-op for the time being, but this may be useful in future to have the profilingService available
285  // if (profilingService.IsProfilingEnabled()){}
286 
287  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
288  while (it != m_LoadedNetworks.end())
289  {
290  auto& loadedNetwork = it->second;
291  loadedNetwork->SendNetworkStructure();
292  // Increment the Iterator to point to next entry
293  it++;
294  }
295 }
296 
298  : m_NetworkIdCounter(0),
299  m_ProfilingService(*this)
300 {
301  const auto start_time = armnn::GetTimeNow();
302  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION;
304  {
305  throw RuntimeException(
306  "It is not possible to enable timeline reporting without profiling being enabled");
307  }
308 
309  // Load any available/compatible dynamic backend before the runtime
310  // goes through the backend registry
311  LoadDynamicBackends(options.m_DynamicBackendsPath);
312 
313  BackendIdSet supportedBackends;
314  for (const auto& id : BackendRegistryInstance().GetBackendIds())
315  {
316  // Store backend contexts for the supported ones
317  try {
318  auto factoryFun = BackendRegistryInstance().GetFactory(id);
319  ARMNN_ASSERT(factoryFun != nullptr);
320  auto backend = factoryFun();
321  ARMNN_ASSERT(backend != nullptr);
322  ARMNN_ASSERT(backend.get() != nullptr);
323 
324  auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
325  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
326  customAllocatorMapIterator->second == nullptr)
327  {
328  // We need to manually clean up the dynamic backends before throwing an exception.
330  m_DeviceSpec.ClearDynamicBackends();
331  throw armnn::Exception("Allocator associated with id " + id.Get() + " is null");
332  }
333 
334  // If the runtime is created in protected mode only add backends that support this mode
335  if (options.m_ProtectedMode)
336  {
337  // check if backend supports ProtectedMode
339  BackendCapability protectedContentCapability {"ProtectedContentAllocation", true};
340  if (!HasCapability(protectedContentCapability, id))
341  {
342  // Protected Content Allocation is not supported by the backend
343  // backend should not be registered
344  ARMNN_LOG(warning) << "Backend "
345  << id
346  << " is not registered as does not support protected content allocation.";
347  continue;
348  }
349  // The user is responsible to provide a custom memory allocator which allows to allocate
350  // protected memory
351  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
352  {
353  std::string err;
354  if (customAllocatorMapIterator->second->GetMemorySourceType()
356  {
357  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
358  {
359  ARMNN_LOG(error) << "The backend "
360  << id
361  << " reported an error when entering protected mode. Backend won't be"
362  << " used. ErrorMsg: " << err;
363  continue;
364  }
365  // No errors so register the Custom Allocator with the BackendRegistry
366  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
367  }
368  else
369  {
370  ARMNN_LOG(error) << "The CustomAllocator provided with the runtime options doesn't support "
371  "protected memory. Protected mode can't be activated. The backend "
372  << id
373  << " is not going to be used. MemorySource must be MemorySource::DmaBufProtected";
374  continue;
375  }
376  }
377  else
378  {
379  ARMNN_LOG(error) << "Protected mode can't be activated for backend: "
380  << id
381  << " no custom allocator was provided to the runtime options.";
382  continue;
383  }
384  }
385  else
386  {
387  // If a custom memory allocator is provided make the backend use that instead of the default
388  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
389  {
390  std::string err;
391  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
392  {
393  ARMNN_LOG(error) << "The backend "
394  << id
395  << " reported an error when trying to use the provided custom allocator."
396  " Backend won't be used."
397  << " ErrorMsg: " << err;
398  continue;
399  }
400  // No errors so register the Custom Allocator with the BackendRegistry
401  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
402  }
403  }
404 
405  // check if custom memory optimizer strategy map is set
406  if (!options.m_MemoryOptimizerStrategyMap.empty())
407  {
408  auto customMemoryOptimizerStrategyMapIterator = options.m_MemoryOptimizerStrategyMap.find(id);
409  // if a memory optimizer strategy is provided make the backend use that instead of the default
410  if (customMemoryOptimizerStrategyMapIterator != options.m_MemoryOptimizerStrategyMap.end())
411  {
412  // no errors.. register the memory optimizer strategy with the BackendRegistry
414  id, customMemoryOptimizerStrategyMapIterator->second);
415 
416  ARMNN_LOG(info) << "MemoryOptimizerStrategy "
417  << customMemoryOptimizerStrategyMapIterator->second->GetName()
418  << " set for the backend " << id << ".";
419  }
420  }
421  else
422  {
423  // check if to use one of the existing memory optimizer strategies is set
424  std::string memoryOptimizerStrategyName = "";
425  ParseOptions(options.m_BackendOptions, id, [&](std::string name, const BackendOptions::Var& value)
426  {
427  if (name == "MemoryOptimizerStrategy")
428  {
429  memoryOptimizerStrategyName = ParseStringBackendOption(value, "");
430  }
431  });
432  if (memoryOptimizerStrategyName != "")
433  {
434  std::shared_ptr<IMemoryOptimizerStrategy> strategy =
435  GetMemoryOptimizerStrategy(memoryOptimizerStrategyName);
436 
437  if (!strategy)
438  {
439  ARMNN_LOG(warning) << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName
440  << " was not found.";
441  }
442  else
443  {
445  auto strategyType = GetMemBlockStrategyTypeName(strategy->GetMemBlockStrategyType());
446  BackendCapability memOptimizeStrategyCapability {strategyType, true};
447  if (HasCapability(memOptimizeStrategyCapability, id))
448  {
450 
451  ARMNN_LOG(info) << "MemoryOptimizerStrategy: "
452  << memoryOptimizerStrategyName << " set for the backend " << id << ".";
453  }
454  else
455  {
456  ARMNN_LOG(warning) << "Backend "
457  << id
458  << " does not have multi-axis packing capability and cannot support"
459  << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName << ".";
460  }
461  }
462  }
463  }
464 
465  auto context = backend->CreateBackendContext(options);
466 
467  // backends are allowed to return nullptrs if they
468  // don't wish to create a backend specific context
469  if (context)
470  {
471  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
472  }
473  supportedBackends.emplace(id);
474 
475  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
476  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
477  options, m_ProfilingService, id));
478 
479  // Backends may also provide a profiling context. Ask for it now.
480  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
481  // Backends that don't support profiling will return a null profiling context.
482  if (profilingContext)
483  {
484  // Pass the context onto the profiling service.
485  m_ProfilingService.AddBackendProfilingContext(id, profilingContext);
486  }
487  }
488  catch (const BackendUnavailableException&)
489  {
490  // Ignore backends which are unavailable
491  }
492  }
493 
494  BackendRegistryInstance().SetProfilingService(m_ProfilingService);
495  // pass configuration info to the profiling service
496  m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions);
497  if (options.m_ProfilingOptions.m_EnableProfiling)
498  {
499  // try to wait for the profiling service to initialise
500  m_ProfilingService.WaitForProfilingServiceActivation(3000);
501  }
502 
503  m_DeviceSpec.AddSupportedBackends(supportedBackends);
504 
505  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
506  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms.";
507 }
508 
510 {
511  const auto startTime = armnn::GetTimeNow();
512  std::vector<int> networkIDs;
513  try
514  {
515  // Coverity fix: The following code may throw an exception of type std::length_error.
516  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
517  std::back_inserter(networkIDs),
518  [](const auto &pair) { return pair.first; });
519  }
520  catch (const std::exception& e)
521  {
522  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
523  // exception of type std::length_error.
524  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
525  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
526  << "\nSome of the loaded networks may not be unloaded" << std::endl;
527  }
528  // We then proceed to unload all the networks which IDs have been appended to the list
529  // up to the point the exception was thrown (if any).
530 
531  for (auto networkID : networkIDs)
532  {
533  try
534  {
535  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
536  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
537  UnloadNetwork(networkID);
538  }
539  catch (const std::exception& e)
540  {
541  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
542  // exception of type std::length_error.
543  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
544  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
545  << std::endl;
546  }
547  }
548 
549  // Clear all dynamic backends.
551  m_DeviceSpec.ClearDynamicBackends();
552  m_BackendContexts.clear();
553 
555  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
556  << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
557 }
558 
559 LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
560 {
561  std::lock_guard<std::mutex> lockGuard(m_Mutex);
562  return m_LoadedNetworks.at(networkId).get();
563 }
564 
566 {
567  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
568 }
569 
571 {
572  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
573 }
574 
575 std::vector<ImportedInputId> RuntimeImpl::ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
576  MemorySource forceImportMemorySource)
577 {
578  return GetLoadedNetworkPtr(networkId)->ImportInputs(inputTensors, forceImportMemorySource);
579 }
580 
581 std::vector<ImportedOutputId> RuntimeImpl::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
582  MemorySource forceImportMemorySource)
583 {
584  return GetLoadedNetworkPtr(networkId)->ImportOutputs(outputTensors, forceImportMemorySource);
585 }
586 
587 void RuntimeImpl::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
588 {
589  return GetLoadedNetworkPtr(networkId)->ClearImportedInputs(inputIds);
590 }
591 void RuntimeImpl::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
592 {
593  return GetLoadedNetworkPtr(networkId)->ClearImportedOutputs(outputIds);
594 }
595 
597  const InputTensors& inputTensors,
598  const OutputTensors& outputTensors,
599  std::vector<ImportedInputId> preImportedInputIds,
600  std::vector<ImportedOutputId> preImportedOutputIds)
601 {
602  const auto startTime = armnn::GetTimeNow();
603 
604  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
605 
606  if (!loadedNetwork)
607  {
608  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
609  return Status::Failure;
610  }
611  if (loadedNetwork->IsAsyncEnabled())
612  {
613  ARMNN_LOG(error) << "Network " << networkId << " is async enabled.";
614  return Status::Failure;
615  }
617 
619 
620  static thread_local NetworkId lastId = networkId;
621  if (lastId != networkId)
622  {
623  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
624  {
625  network->FreeWorkingMemory();
626  });
627  }
628  lastId=networkId;
629 
630  auto status = loadedNetwork->EnqueueWorkload(inputTensors, outputTensors,
631  preImportedInputIds, preImportedOutputIds);
632 
633 
634  // Check if we imported, if not there's no need to call the After EnqueueWorkload events
635  if (!preImportedInputIds.empty() || !preImportedOutputIds.empty())
636  {
637  // Call After EnqueueWorkload events
638  for (auto&& context : m_BackendContexts)
639  {
640  context.second->AfterEnqueueWorkload(networkId);
641  }
642  }
643  ARMNN_LOG(info) << "Execution time: " << std::setprecision(2)
644  << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
645  return status;
646 }
647 
649  const InputTensors& inputTensors,
650  const OutputTensors& outputTensors,
651  std::vector<ImportedInputId> preImportedInputs,
652  std::vector<ImportedOutputId> preImportedOutputs)
653 {
654  const auto startTime = armnn::GetTimeNow();
655 
656  NetworkId networkId = iWorkingMemHandle.GetNetworkId();
657  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
658 
659  if (!loadedNetwork)
660  {
661  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
662  return Status::Failure;
663  }
664  if (!loadedNetwork->IsAsyncEnabled())
665  {
666  ARMNN_LOG(error) << "Attempting execute " << networkId << " when it is not async enabled.";
667  return Status::Failure;
668  }
670 
672 
673  auto status = loadedNetwork->Execute(inputTensors,
674  outputTensors,
675  iWorkingMemHandle,
676  preImportedInputs,
677  preImportedOutputs);
678 
679  ARMNN_LOG(info) << "Execution time: " << std::setprecision(2)
680  << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
681 
682  return status;
683 }
684 
685 /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
686 /// overlapped Execution by calling this function from different threads.
687 std::unique_ptr<IWorkingMemHandle> RuntimeImpl::CreateWorkingMemHandle(NetworkId networkId)
688 {
689  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
690 
691  if (!loadedNetwork)
692  {
693  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
694  return nullptr;
695  }
696  if (!loadedNetwork->IsAsyncEnabled())
697  {
698  ARMNN_LOG(error) << "Network " << networkId << " is not async enabled.";
699  return nullptr;
700  }
702 
703  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CreateWorkingMemHandle");
704 
705  static thread_local NetworkId lastId = networkId;
706  if (lastId != networkId)
707  {
708  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
709  {
710  network->FreeWorkingMemory();
711  });
712  }
713  lastId=networkId;
714 
715  return loadedNetwork->CreateWorkingMemHandle(networkId);
716 }
717 
719 {
720  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
721  loadedNetwork->RegisterDebugCallback(func);
722 }
723 
724 void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
725 {
726  // Get the paths where to load the dynamic backends from
727  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
728 
729  // Get the shared objects to try to load as dynamic backends
730  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
731 
732  // Create a list of dynamic backends
733  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
734 
735  // Register the dynamic backends in the backend registry
736  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
737 
738  // Add the registered dynamic backend ids to the list of supported backends
739  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
740 }
741 
742 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
Thread safe execution of the loaded network.
armnn::TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:565
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
void WaitForProfilingServiceActivation(unsigned int timeout) override
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:40
FactoryFunction GetFactory(const BackendId &id) const
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:135
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
Very basic type safe variant.
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
virtual NetworkId GetNetworkId()=0
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:193
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:568
void ParseOptions(const std::vector< BackendOptions > &options, BackendId backend, F f)
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs, std::vector< ImportedOutputId > preImportedOutputs)
This is an experimental function.
Definition: Runtime.cpp:648
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition: Runtime.cpp:214
void RegisterMemoryOptimizerStrategy(const BackendId &id, std::shared_ptr< IMemoryOptimizerStrategy > strategy)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:70
void RegisterAllocator(const BackendId &id, std::shared_ptr< ICustomAllocator > alloc)
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition: Runtime.cpp:150
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:570
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
std::map< BackendId, std::shared_ptr< IMemoryOptimizerStrategy > > m_MemoryOptimizerStrategyMap
A map to define a custom memory optimizer strategy for specific backend Ids.
Definition: IRuntime.hpp:120
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< ImportedInputId > ImportInputs(const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
Definition: IRuntime.hpp:136
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:718
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:270
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:371
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:45
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:140
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:187
void ReportStructure()
Definition: Runtime.cpp:282
BackendCapability
BackendCapability class.
Definition: Types.hpp:254
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
int NetworkId
Definition: IRuntime.hpp:25
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:120
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Un-import and delete the imported InputTensor/s This function is not thread safe and must not be used...
Definition: Runtime.cpp:92
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
Status
enumeration
Definition: Types.hpp:29
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Definition: Runtime.cpp:587
std::map< BackendId, std::shared_ptr< ICustomAllocator > > m_CustomAllocatorMap
A map to define a custom memory allocator for specific backend Ids.
Definition: IRuntime.hpp:112
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
This is an experimental function.
Definition: Runtime.cpp:111
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition: IRuntime.hpp:288
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:101
void ClearImportedInputs(const std::vector< ImportedInputId > inputIds)
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition: Runtime.cpp:50
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:267
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:35
std::unique_ptr< IMemoryOptimizerStrategy > GetMemoryOptimizerStrategy(const std::string &strategyName)
const IDeviceSpec & GetDeviceSpec() const
Definition: Runtime.cpp:125
bool m_ProtectedMode
Setting this flag will allow the user to create the Runtime in protected mode.
Definition: IRuntime.hpp:103
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Definition: Runtime.cpp:80
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:96
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:75
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Single thread execution of the loaded network.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:575
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Definition: Runtime.cpp:596
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
const std::shared_ptr< IProfiler > & GetProfiler() const
std::vector< ImportedOutputId > ImportOutputs(const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:217
void RegisterDebugCallback(const DebugCallbackFunction &func)
constexpr const char * GetMemBlockStrategyTypeName(MemBlockStrategyType memBlockStrategyType)
Definition: TypesUtils.hpp:264
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
Definition: Runtime.cpp:575
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:297
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:130
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
Definition: IRuntime.hpp:138
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
Definition: Runtime.cpp:86
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Un-import and delete the imported OutputTensor/s This function is not thread safe and must not be use...
Definition: Runtime.cpp:96
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:687
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:151
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
Definition: Runtime.cpp:581
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Definition: Runtime.cpp:591
void ClearImportedOutputs(const std::vector< ImportedOutputId > outputIds)
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68