ArmNN
 21.11
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
10 #include <armnn/Logging.hpp>
11 #include <armnn/utility/Timer.hpp>
12 
17 
18 #include <common/include/LabelsAndEventClasses.hpp>
19 
20 #include <iostream>
21 
23 
24 using namespace armnn;
25 using namespace std;
26 
27 namespace armnn
28 {
30 
32 
33 IRuntime::~IRuntime() = default;
34 
36 {
37  return new IRuntime(options);
38 }
39 
41 {
42  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
43 }
44 
46 {
47  delete runtime;
48 }
49 
51 {
52  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
53 }
54 
56  IOptimizedNetworkPtr network,
57  std::string& errorMessage)
58 {
59  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
60 }
61 
63  IOptimizedNetworkPtr network,
64  std::string& errorMessage,
65  const INetworkProperties& networkProperties)
66 {
67  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
68 }
69 
71 {
72  return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
73 }
74 
76 {
77  return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
78 }
79 
80 std::vector<ImportedInputId> IRuntime::ImportInputs(NetworkId networkId, const InputTensors& inputTensors)
81 {
82  return pRuntimeImpl->ImportInputs(networkId, inputTensors);
83 }
84 
85 std::vector<ImportedOutputId> IRuntime::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors)
86 {
87  return pRuntimeImpl->ImportOutputs(networkId, outputTensors);
88 }
89 
90 void IRuntime::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
91 {
92  return pRuntimeImpl->ClearImportedInputs(networkId, inputIds);
93 }
94 void IRuntime::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
95 {
96  return pRuntimeImpl->ClearImportedOutputs(networkId, outputIds);
97 }
98 
100  const InputTensors& inputTensors,
101  const OutputTensors& outputTensors)
102 {
103  return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors);
104 }
105 
107  const InputTensors& inputTensors,
108  const OutputTensors& outputTensors,
109  std::vector<ImportedInputId> preImportedInputs,
110  std::vector<ImportedOutputId> preImportedOutputs)
111 {
112  return pRuntimeImpl->Execute(workingMemHandle, inputTensors, outputTensors, preImportedInputs, preImportedOutputs);
113 }
114 
116 {
117  return pRuntimeImpl->UnloadNetwork(networkId);
118 }
119 
121 {
122  return pRuntimeImpl->GetDeviceSpec();
123 }
124 
125 std::unique_ptr<IWorkingMemHandle> IRuntime::CreateWorkingMemHandle(NetworkId networkId)
126 {
127  return pRuntimeImpl->CreateWorkingMemHandle(networkId);
128 }
129 
130 const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
131 {
132  return pRuntimeImpl->GetProfiler(networkId);
133 }
134 
136 {
137  return pRuntimeImpl->RegisterDebugCallback(networkId, func);
138 }
139 
140 int RuntimeImpl::GenerateNetworkId()
141 {
142  return m_NetworkIdCounter++;
143 }
144 
146 {
147  std::string ignoredErrorMessage;
148  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
149 }
150 
152  IOptimizedNetworkPtr inNetwork,
153  std::string& errorMessage)
154 {
155  INetworkProperties networkProperties(
157  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
158 }
159 
161  IOptimizedNetworkPtr inNetwork,
162  std::string& errorMessage,
163  const INetworkProperties& networkProperties)
164 {
165  // Register the profiler
166  auto profiler = inNetwork->GetProfiler();
168 
169  IOptimizedNetwork* rawNetwork = inNetwork.release();
170 
171  networkIdOut = GenerateNetworkId();
172 
173  for (auto&& context : m_BackendContexts)
174  {
175  context.second->BeforeLoadNetwork(networkIdOut);
176  }
177 
178  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
179  std::unique_ptr<IOptimizedNetwork>(rawNetwork),
180  errorMessage,
181  networkProperties,
182  m_ProfilingService);
183 
184  if (!loadedNetwork)
185  {
186  return Status::Failure;
187  }
188 
189  {
190  std::lock_guard<std::mutex> lockGuard(m_Mutex);
191 
192  // Stores the network
193  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
194  }
195 
196  for (auto&& context : m_BackendContexts)
197  {
198  context.second->AfterLoadNetwork(networkIdOut);
199  }
200 
201  if (m_ProfilingService.IsProfilingEnabled())
202  {
203  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
204  }
205 
206  return Status::Success;
207 }
208 
210 {
211  bool unloadOk = true;
212  for (auto&& context : m_BackendContexts)
213  {
214  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
215  }
216 
217  if (!unloadOk)
218  {
219  ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
220  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
221  return Status::Failure;
222  }
223 
224  std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
226  {
227  std::lock_guard<std::mutex> lockGuard(m_Mutex);
228 
229  // If timeline recording is on mark the Network end of life
230  if (timelineUtils)
231  {
232  auto search = m_LoadedNetworks.find(networkId);
233  if (search != m_LoadedNetworks.end())
234  {
235  profiling::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
236  timelineUtils->RecordEvent(networkGuid,
237  profiling::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
238  }
239  }
240  if (m_LoadedNetworks.erase(networkId) == 0)
241  {
242  ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
243  return Status::Failure;
244  }
245 
246  if (m_ProfilingService.IsProfilingEnabled())
247  {
248  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
249  }
250  }
251 
252  for (auto&& context : m_BackendContexts)
253  {
254  context.second->AfterUnloadNetwork(networkId);
255  }
256 
257  // Unregister the profiler
259 
260  ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
261  return Status::Success;
262 }
263 
264 const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
265 {
266  auto it = m_LoadedNetworks.find(networkId);
267  if (it != m_LoadedNetworks.end())
268  {
269  auto& loadedNetwork = it->second;
270  return loadedNetwork->GetProfiler();
271  }
272 
273  return nullptr;
274 }
275 
276 void RuntimeImpl::ReportStructure() // armnn::profiling::IProfilingService& profilingService as param
277 {
278  // No-op for the time being, but this may be useful in future to have the profilingService available
279  // if (profilingService.IsProfilingEnabled()){}
280 
281  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
282  while (it != m_LoadedNetworks.end())
283  {
284  auto& loadedNetwork = it->second;
285  loadedNetwork->SendNetworkStructure();
286  // Increment the Iterator to point to next entry
287  it++;
288  }
289 }
290 
292  : m_NetworkIdCounter(0),
293  m_ProfilingService(*this)
294 {
295  const auto start_time = armnn::GetTimeNow();
296  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n";
298  {
299  throw RuntimeException(
300  "It is not possible to enable timeline reporting without profiling being enabled");
301  }
302 
303  // Load any available/compatible dynamic backend before the runtime
304  // goes through the backend registry
305  LoadDynamicBackends(options.m_DynamicBackendsPath);
306 
307  BackendIdSet supportedBackends;
308  for (const auto& id : BackendRegistryInstance().GetBackendIds())
309  {
310  // Store backend contexts for the supported ones
311  try {
312  auto factoryFun = BackendRegistryInstance().GetFactory(id);
313  ARMNN_ASSERT(factoryFun != nullptr);
314  auto backend = factoryFun();
315  ARMNN_ASSERT(backend != nullptr);
316  ARMNN_ASSERT(backend.get() != nullptr);
317 
318  auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
319  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
320  customAllocatorMapIterator->second == nullptr)
321  {
322  // We need to manually clean up the dynamic backends before throwing an exception.
324  m_DeviceSpec.ClearDynamicBackends();
325  throw armnn::Exception("Allocator associated with id " + id.Get() + " is null");
326  }
327 
328  // If the runtime is created in protected mode only add backends that support this mode
329  if (options.m_ProtectedMode)
330  {
331  // check if backend supports ProtectedMode
333  BackendCapability protectedContentCapability {"ProtectedContentAllocation", true};
334  if (!HasCapability(protectedContentCapability, id))
335  {
336  // Protected Content Allocation is not supported by the backend
337  // backend should not be registered
338  ARMNN_LOG(warning) << "Backend "
339  << id
340  << " is not registered as does not support protected content allocation \n";
341  continue;
342  }
343  // The user is responsible to provide a custom memory allocator which allows to allocate
344  // protected memory
345  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
346  {
347  std::string err;
348  if (customAllocatorMapIterator->second->GetMemorySourceType()
350  {
351  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
352  {
353  ARMNN_LOG(error) << "The backend "
354  << id
355  << " reported an error when entering protected mode. Backend won't be"
356  << " used. ErrorMsg: " << err;
357  continue;
358  }
359  // No errors so register the Custom Allocator with the BackendRegistry
360  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
361  }
362  else
363  {
364  ARMNN_LOG(error) << "The CustomAllocator provided with the runtime options doesn't support "
365  "protected memory. Protected mode can't be activated. The backend "
366  << id
367  << " is not going to be used. MemorySource must be MemorySource::DmaBufProtected";
368  continue;
369  }
370  }
371  else
372  {
373  ARMNN_LOG(error) << "Protected mode can't be activated for backend: "
374  << id
375  << " no custom allocator was provided to the runtime options.";
376  continue;
377  }
378  }
379  else
380  {
381  // If a custom memory allocator is provided make the backend use that instead of the default
382  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
383  {
384  std::string err;
385  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
386  {
387  ARMNN_LOG(error) << "The backend "
388  << id
389  << " reported an error when trying to use the provided custom allocator."
390  " Backend won't be used."
391  << " ErrorMsg: " << err;
392  continue;
393  }
394  // No errors so register the Custom Allocator with the BackendRegistry
395  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
396  }
397  }
398 
399  // check if custom memory optimizer strategy map is set
400  if (!options.m_MemoryOptimizerStrategyMap.empty())
401  {
402  auto customMemoryOptimizerStrategyMapIterator = options.m_MemoryOptimizerStrategyMap.find(id);
403  // if a memory optimizer strategy is provided make the backend use that instead of the default
404  if (customMemoryOptimizerStrategyMapIterator != options.m_MemoryOptimizerStrategyMap.end())
405  {
406  // no errors.. register the memory optimizer strategy with the BackendRegistry
408  id, customMemoryOptimizerStrategyMapIterator->second);
409 
410  ARMNN_LOG(info) << "MemoryOptimizerStrategy "
411  << customMemoryOptimizerStrategyMapIterator->second->GetName()
412  << " set for the backend " << id << ".";
413  }
414  }
415  else
416  {
417  // check if to use one of the existing memory optimizer strategies is set
418  std::string memoryOptimizerStrategyName = "";
419  ParseOptions(options.m_BackendOptions, id, [&](std::string name, const BackendOptions::Var& value)
420  {
421  if (name == "MemoryOptimizerStrategy")
422  {
423  memoryOptimizerStrategyName = ParseStringBackendOption(value, "");
424  }
425  });
426  if (memoryOptimizerStrategyName != "")
427  {
428  std::shared_ptr<IMemoryOptimizerStrategy> strategy =
429  GetMemoryOptimizerStrategy(memoryOptimizerStrategyName);
430 
431  if (!strategy)
432  {
433  ARMNN_LOG(warning) << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName
434  << " was not found \n";
435  }
436  else
437  {
439  auto strategyType = GetMemBlockStrategyTypeName(strategy->GetMemBlockStrategyType());
440  BackendCapability memOptimizeStrategyCapability {strategyType, true};
441  if (HasCapability(memOptimizeStrategyCapability, id))
442  {
444 
445  ARMNN_LOG(info) << "MemoryOptimizerStrategy: "
446  << memoryOptimizerStrategyName << " set for the backend " << id << ".";
447  }
448  else
449  {
450  ARMNN_LOG(warning) << "Backend "
451  << id
452  << " does not have multi-axis packing capability and cannot support"
453  << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName << "\n";
454  }
455  }
456  }
457  }
458 
459  auto context = backend->CreateBackendContext(options);
460 
461  // backends are allowed to return nullptrs if they
462  // don't wish to create a backend specific context
463  if (context)
464  {
465  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
466  }
467  supportedBackends.emplace(id);
468 
469  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
470  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
471  options, m_ProfilingService, id));
472 
473  // Backends may also provide a profiling context. Ask for it now.
474  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
475  // Backends that don't support profiling will return a null profiling context.
476  if (profilingContext)
477  {
478  // Pass the context onto the profiling service.
479  m_ProfilingService.AddBackendProfilingContext(id, profilingContext);
480  }
481  }
482  catch (const BackendUnavailableException&)
483  {
484  // Ignore backends which are unavailable
485  }
486  }
487 
488  BackendRegistryInstance().SetProfilingService(m_ProfilingService);
489  // pass configuration info to the profiling service
490  m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions);
491  if (options.m_ProfilingOptions.m_EnableProfiling)
492  {
493  // try to wait for the profiling service to initialise
494  m_ProfilingService.WaitForProfilingServiceActivation(3000);
495  }
496 
497  m_DeviceSpec.AddSupportedBackends(supportedBackends);
498 
499  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
500  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
501 }
502 
504 {
505  const auto start_time = armnn::GetTimeNow();
506  std::vector<int> networkIDs;
507  try
508  {
509  // Coverity fix: The following code may throw an exception of type std::length_error.
510  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
511  std::back_inserter(networkIDs),
512  [](const auto &pair) { return pair.first; });
513  }
514  catch (const std::exception& e)
515  {
516  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
517  // exception of type std::length_error.
518  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
519  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
520  << "\nSome of the loaded networks may not be unloaded" << std::endl;
521  }
522  // We then proceed to unload all the networks which IDs have been appended to the list
523  // up to the point the exception was thrown (if any).
524 
525  for (auto networkID : networkIDs)
526  {
527  try
528  {
529  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
530  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
531  UnloadNetwork(networkID);
532  }
533  catch (const std::exception& e)
534  {
535  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
536  // exception of type std::length_error.
537  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
538  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
539  << std::endl;
540  }
541  }
542 
543  // Clear all dynamic backends.
545  m_DeviceSpec.ClearDynamicBackends();
546  m_BackendContexts.clear();
547 
549  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
550  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
551 }
552 
553 LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
554 {
555  std::lock_guard<std::mutex> lockGuard(m_Mutex);
556  return m_LoadedNetworks.at(networkId).get();
557 }
558 
560 {
561  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
562 }
563 
565 {
566  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
567 }
568 
569 std::vector<ImportedInputId> RuntimeImpl::ImportInputs(NetworkId networkId, const InputTensors& inputTensors)
570 {
571  return GetLoadedNetworkPtr(networkId)->ImportInputs(inputTensors);
572 }
573 
574 std::vector<ImportedOutputId> RuntimeImpl::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors)
575 {
576  return GetLoadedNetworkPtr(networkId)->ImportOutputs(outputTensors);
577 }
578 
579 void RuntimeImpl::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
580 {
581  return GetLoadedNetworkPtr(networkId)->ClearImportedInputs(inputIds);
582 }
583 void RuntimeImpl::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
584 {
585  return GetLoadedNetworkPtr(networkId)->ClearImportedOutputs(outputIds);
586 }
587 
589  const InputTensors& inputTensors,
590  const OutputTensors& outputTensors)
591 {
592  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
593 
594  if (!loadedNetwork)
595  {
596  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
597  return Status::Failure;
598  }
599  if (loadedNetwork->IsAsyncEnabled())
600  {
601  ARMNN_LOG(error) << "Network " << networkId << " is async enabled.\n";
602  return Status::Failure;
603  }
605 
607 
608  static thread_local NetworkId lastId = networkId;
609  if (lastId != networkId)
610  {
611  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
612  {
613  network->FreeWorkingMemory();
614  });
615  }
616  lastId=networkId;
617 
618  return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors);
619 }
620 
622  const InputTensors& inputTensors,
623  const OutputTensors& outputTensors,
624  std::vector<ImportedInputId> preImportedInputs,
625  std::vector<ImportedOutputId> preImportedOutputs)
626 {
627  NetworkId networkId = iWorkingMemHandle.GetNetworkId();
628  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
629 
630  if (!loadedNetwork)
631  {
632  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
633  return Status::Failure;
634  }
635  if (!loadedNetwork->IsAsyncEnabled())
636  {
637  ARMNN_LOG(error) << "Attempting execute " << networkId << " when it is not async enabled.\n";
638  return Status::Failure;
639  }
641 
643 
644  return loadedNetwork->Execute(inputTensors,
645  outputTensors,
646  iWorkingMemHandle,
647  preImportedInputs,
648  preImportedOutputs);
649 }
650 
651 /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
652 /// overlapped Execution by calling this function from different threads.
653 std::unique_ptr<IWorkingMemHandle> RuntimeImpl::CreateWorkingMemHandle(NetworkId networkId)
654 {
655  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
656 
657  if (!loadedNetwork)
658  {
659  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
660  return nullptr;
661  }
662  if (!loadedNetwork->IsAsyncEnabled())
663  {
664  ARMNN_LOG(error) << "Network " << networkId << " is not async enabled.\n";
665  return nullptr;
666  }
668 
669  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CreateWorkingMemHandle");
670 
671  static thread_local NetworkId lastId = networkId;
672  if (lastId != networkId)
673  {
674  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
675  {
676  network->FreeWorkingMemory();
677  });
678  }
679  lastId=networkId;
680 
681  return loadedNetwork->CreateWorkingMemHandle(networkId);
682 }
683 
685 {
686  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
687  loadedNetwork->RegisterDebugCallback(func);
688 }
689 
690 void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
691 {
692  // Get the paths where to load the dynamic backends from
693  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
694 
695  // Get the shared objects to try to load as dynamic backends
696  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
697 
698  // Create a list of dynamic backends
699  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
700 
701  // Register the dynamic backends in the backend registry
702  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
703 
704  // Add the registered dynamic backend ids to the list of supported backends
705  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
706 }
707 
708 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
Thread safe execution of the loaded network.
armnn::TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:559
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
void WaitForProfilingServiceActivation(unsigned int timeout) override
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:40
FactoryFunction GetFactory(const BackendId &id) const
std::vector< ImportedOutputId > ImportOutputs(const OutputTensors &outputTensors)
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:130
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
Very basic type safe variant.
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
virtual NetworkId GetNetworkId()=0
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:193
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:568
void ParseOptions(const std::vector< BackendOptions > &options, BackendId backend, F f)
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs, std::vector< ImportedOutputId > preImportedOutputs)
This is an experimental function.
Definition: Runtime.cpp:621
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition: Runtime.cpp:209
void RegisterMemoryOptimizerStrategy(const BackendId &id, std::shared_ptr< IMemoryOptimizerStrategy > strategy)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:70
void RegisterAllocator(const BackendId &id, std::shared_ptr< ICustomAllocator > alloc)
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition: Runtime.cpp:145
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors)
Definition: Runtime.cpp:569
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:564
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
std::map< BackendId, std::shared_ptr< IMemoryOptimizerStrategy > > m_MemoryOptimizerStrategyMap
A map to define a custom memory optimizer strategy for specific backend Ids.
Definition: IRuntime.hpp:153
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Definition: Runtime.cpp:588
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
Definition: IRuntime.hpp:169
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:684
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:264
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:357
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:277
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:45
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:135
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:220
void ReportStructure()
Definition: Runtime.cpp:276
BackendCapability
BackendCapability class.
Definition: Types.hpp:254
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
int NetworkId
Definition: IRuntime.hpp:25
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:115
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Un-import and delete the imported InputTensor/s This function is not thread safe and must not be used...
Definition: Runtime.cpp:90
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
Status
enumeration
Definition: Types.hpp:29
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:198
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Definition: Runtime.cpp:579
std::map< BackendId, std::shared_ptr< ICustomAllocator > > m_CustomAllocatorMap
A map to define a custom memory allocator for specific backend Ids.
Definition: IRuntime.hpp:145
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
This is an experimental function.
Definition: Runtime.cpp:106
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition: IRuntime.hpp:318
void ClearImportedInputs(const std::vector< ImportedInputId > inputIds)
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition: Runtime.cpp:50
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:267
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:35
std::unique_ptr< IMemoryOptimizerStrategy > GetMemoryOptimizerStrategy(const std::string &strategyName)
const IDeviceSpec & GetDeviceSpec() const
Definition: Runtime.cpp:120
bool m_ProtectedMode
Setting this flag will allow the user to create the Runtime in protected mode.
Definition: IRuntime.hpp:136
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
Single thread execution of the loaded network.
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:129
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:75
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:575
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Definition: Runtime.cpp:80
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
const std::shared_ptr< IProfiler > & GetProfiler() const
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
Definition: Runtime.cpp:85
void RegisterDebugCallback(const DebugCallbackFunction &func)
std::vector< ImportedInputId > ImportInputs(const InputTensors &inputTensors)
constexpr const char * GetMemBlockStrategyTypeName(MemBlockStrategyType memBlockStrategyType)
Definition: TypesUtils.hpp:264
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:291
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:125
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
Definition: IRuntime.hpp:171
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors)
Definition: Runtime.cpp:574
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Un-import and delete the imported OutputTensor/s This function is not thread safe and must not be use...
Definition: Runtime.cpp:94
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:653
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:184
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Definition: Runtime.cpp:583
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:99
void ClearImportedOutputs(const std::vector< ImportedOutputId > outputIds)
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68