ArmNN
 21.08
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
10 #include <armnn/Logging.hpp>
11 #include <armnn/utility/Timer.hpp>
12 
16 
17 #include <common/include/LabelsAndEventClasses.hpp>
18 
19 #include <iostream>
20 
22 
23 using namespace armnn;
24 using namespace std;
25 
26 namespace armnn
27 {
29 
31 
32 IRuntime::~IRuntime() = default;
33 
35 {
36  return new IRuntime(options);
37 }
38 
40 {
41  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
42 }
43 
45 {
46  delete runtime;
47 }
48 
50 {
51  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
52 }
53 
55  IOptimizedNetworkPtr network,
56  std::string& errorMessage)
57 {
58  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
59 }
60 
62  IOptimizedNetworkPtr network,
63  std::string& errorMessage,
64  const INetworkProperties& networkProperties)
65 {
66  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
67 }
68 
70 {
71  return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
72 }
73 
75 {
76  return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
77 }
78 
80  const InputTensors& inputTensors,
81  const OutputTensors& outputTensors)
82 {
83  return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors);
84 }
85 
87  const InputTensors& inputTensors,
88  const OutputTensors& outputTensors)
89 {
90  return pRuntimeImpl->Execute(workingMemHandle, inputTensors, outputTensors);
91 }
92 
94 {
95  return pRuntimeImpl->UnloadNetwork(networkId);
96 }
97 
99 {
100  return pRuntimeImpl->GetDeviceSpec();
101 }
102 
103 std::unique_ptr<IWorkingMemHandle> IRuntime::CreateWorkingMemHandle(NetworkId networkId)
104 {
105  return pRuntimeImpl->CreateWorkingMemHandle(networkId);
106 }
107 
108 const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
109 {
110  return pRuntimeImpl->GetProfiler(networkId);
111 }
112 
114 {
115  return pRuntimeImpl->RegisterDebugCallback(networkId, func);
116 }
117 
118 int RuntimeImpl::GenerateNetworkId()
119 {
120  return m_NetworkIdCounter++;
121 }
122 
124 {
125  std::string ignoredErrorMessage;
126  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
127 }
128 
130  IOptimizedNetworkPtr inNetwork,
131  std::string& errorMessage)
132 {
133  INetworkProperties networkProperties(
135  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
136 }
137 
139  IOptimizedNetworkPtr inNetwork,
140  std::string& errorMessage,
141  const INetworkProperties& networkProperties)
142 {
143  IOptimizedNetwork* rawNetwork = inNetwork.release();
144 
145  networkIdOut = GenerateNetworkId();
146 
147  for (auto&& context : m_BackendContexts)
148  {
149  context.second->BeforeLoadNetwork(networkIdOut);
150  }
151 
152  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
153  std::unique_ptr<IOptimizedNetwork>(rawNetwork),
154  errorMessage,
155  networkProperties,
156  m_ProfilingService);
157 
158  if (!loadedNetwork)
159  {
160  return Status::Failure;
161  }
162 
163  {
164  std::lock_guard<std::mutex> lockGuard(m_Mutex);
165 
166  // Stores the network
167  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
168  }
169 
170  for (auto&& context : m_BackendContexts)
171  {
172  context.second->AfterLoadNetwork(networkIdOut);
173  }
174 
175  if (m_ProfilingService.IsProfilingEnabled())
176  {
177  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
178  }
179 
180  return Status::Success;
181 }
182 
184 {
185  bool unloadOk = true;
186  for (auto&& context : m_BackendContexts)
187  {
188  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
189  }
190 
191  if (!unloadOk)
192  {
193  ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
194  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
195  return Status::Failure;
196  }
197 
198  std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
200  {
201  std::lock_guard<std::mutex> lockGuard(m_Mutex);
202 
203  // If timeline recording is on mark the Network end of life
204  if (timelineUtils)
205  {
206  auto search = m_LoadedNetworks.find(networkId);
207  if (search != m_LoadedNetworks.end())
208  {
209  profiling::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
210  timelineUtils->RecordEvent(networkGuid,
211  profiling::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
212  }
213  }
214  if (m_LoadedNetworks.erase(networkId) == 0)
215  {
216  ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
217  return Status::Failure;
218  }
219 
220  if (m_ProfilingService.IsProfilingEnabled())
221  {
222  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
223  }
224  }
225 
226  for (auto&& context : m_BackendContexts)
227  {
228  context.second->AfterUnloadNetwork(networkId);
229  }
230 
231  ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
232  return Status::Success;
233 }
234 
235 const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
236 {
237  auto it = m_LoadedNetworks.find(networkId);
238  if (it != m_LoadedNetworks.end())
239  {
240  auto& loadedNetwork = it->second;
241  return loadedNetwork->GetProfiler();
242  }
243 
244  return nullptr;
245 }
246 
247 void RuntimeImpl::ReportStructure() // armnn::profiling::IProfilingService& profilingService as param
248 {
249  // No-op for the time being, but this may be useful in future to have the profilingService available
250  // if (profilingService.IsProfilingEnabled()){}
251 
252  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
253  while (it != m_LoadedNetworks.end())
254  {
255  auto& loadedNetwork = it->second;
256  loadedNetwork->SendNetworkStructure();
257  // Increment the Iterator to point to next entry
258  it++;
259  }
260 }
261 
263  : m_NetworkIdCounter(0),
264  m_ProfilingService(*this)
265 {
266  const auto start_time = armnn::GetTimeNow();
267  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n";
269  {
270  throw RuntimeException(
271  "It is not possible to enable timeline reporting without profiling being enabled");
272  }
273 
274  // Load any available/compatible dynamic backend before the runtime
275  // goes through the backend registry
276  LoadDynamicBackends(options.m_DynamicBackendsPath);
277 
278  BackendIdSet supportedBackends;
279  for (const auto& id : BackendRegistryInstance().GetBackendIds())
280  {
281  // Store backend contexts for the supported ones
282  try {
283  auto factoryFun = BackendRegistryInstance().GetFactory(id);
284  ARMNN_ASSERT(factoryFun != nullptr);
285  auto backend = factoryFun();
286  ARMNN_ASSERT(backend != nullptr);
287  ARMNN_ASSERT(backend.get() != nullptr);
288 
289  auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
290  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
291  customAllocatorMapIterator->second == nullptr)
292  {
293  // We need to manually clean up the dynamic backends before throwing an exception.
295  m_DeviceSpec.ClearDynamicBackends();
296  throw armnn::Exception("Allocator associated with id " + id.Get() + " is null");
297  }
298 
299  // If the runtime is created in protected mode only add backends that support this mode
300  if (options.m_ProtectedMode)
301  {
302  // check if backend supports ProtectedMode
304  BackendCapability protectedContentCapability {"ProtectedContentAllocation", true};
305  if (!HasCapability(protectedContentCapability, id))
306  {
307  // Protected Content Allocation is not supported by the backend
308  // backend should not be registered
309  ARMNN_LOG(warning) << "Backend "
310  << id
311  << " is not registered as does not support protected content allocation \n";
312  continue;
313  }
314  // The user is responsible to provide a custom memory allocator which allows to allocate
315  // protected memory
316  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
317  {
318  std::string err;
319  if (customAllocatorMapIterator->second->GetMemorySourceType()
321  {
322  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
323  {
324  ARMNN_LOG(error) << "The backend "
325  << id
326  << " reported an error when entering protected mode. Backend won't be"
327  << " used. ErrorMsg: " << err;
328  continue;
329  }
330  // No errors so register the Custom Allocator with the BackendRegistry
331  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
332  }
333  else
334  {
335  ARMNN_LOG(error) << "The CustomAllocator provided with the runtime options doesn't support "
336  "protected memory. Protected mode can't be activated. The backend "
337  << id
338  << " is not going to be used. MemorySource must be MemorySource::DmaBufProtected";
339  continue;
340  }
341  }
342  else
343  {
344  ARMNN_LOG(error) << "Protected mode can't be activated for backend: "
345  << id
346  << " no custom allocator was provided to the runtime options.";
347  continue;
348  }
349  }
350  else
351  {
352  // If a custom memory allocator is provided make the backend use that instead of the default
353  if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
354  {
355  std::string err;
356  if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
357  {
358  ARMNN_LOG(error) << "The backend "
359  << id
360  << " reported an error when trying to use the provided custom allocator."
361  " Backend won't be used."
362  << " ErrorMsg: " << err;
363  continue;
364  }
365  // No errors so register the Custom Allocator with the BackendRegistry
366  BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
367  }
368  }
369  auto context = backend->CreateBackendContext(options);
370 
371  // backends are allowed to return nullptrs if they
372  // don't wish to create a backend specific context
373  if (context)
374  {
375  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
376  }
377  supportedBackends.emplace(id);
378 
379  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
380  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
381  options, m_ProfilingService, id));
382 
383  // Backends may also provide a profiling context. Ask for it now.
384  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
385  // Backends that don't support profiling will return a null profiling context.
386  if (profilingContext)
387  {
388  // Pass the context onto the profiling service.
389  m_ProfilingService.AddBackendProfilingContext(id, profilingContext);
390  }
391  }
392  catch (const BackendUnavailableException&)
393  {
394  // Ignore backends which are unavailable
395  }
396  }
397 
398  BackendRegistryInstance().SetProfilingService(m_ProfilingService);
399  // pass configuration info to the profiling service
400  m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions);
402  {
403  // try to wait for the profiling service to initialise
404  m_ProfilingService.WaitForProfilingServiceActivation(3000);
405  }
406 
407  m_DeviceSpec.AddSupportedBackends(supportedBackends);
408 
409  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
410  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
411 }
412 
414 {
415  const auto start_time = armnn::GetTimeNow();
416  std::vector<int> networkIDs;
417  try
418  {
419  // Coverity fix: The following code may throw an exception of type std::length_error.
420  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
421  std::back_inserter(networkIDs),
422  [](const auto &pair) { return pair.first; });
423  }
424  catch (const std::exception& e)
425  {
426  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
427  // exception of type std::length_error.
428  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
429  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
430  << "\nSome of the loaded networks may not be unloaded" << std::endl;
431  }
432  // We then proceed to unload all the networks which IDs have been appended to the list
433  // up to the point the exception was thrown (if any).
434 
435  for (auto networkID : networkIDs)
436  {
437  try
438  {
439  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
440  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
441  UnloadNetwork(networkID);
442  }
443  catch (const std::exception& e)
444  {
445  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
446  // exception of type std::length_error.
447  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
448  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
449  << std::endl;
450  }
451  }
452 
453  // Clear all dynamic backends.
455  m_DeviceSpec.ClearDynamicBackends();
456  m_BackendContexts.clear();
457 
459  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
460  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
461 }
462 
463 LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
464 {
465  std::lock_guard<std::mutex> lockGuard(m_Mutex);
466  return m_LoadedNetworks.at(networkId).get();
467 }
468 
470 {
471  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
472 }
473 
475 {
476  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
477 }
478 
479 
481  const InputTensors& inputTensors,
482  const OutputTensors& outputTensors)
483 {
484  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
485 
486  if (!loadedNetwork)
487  {
488  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
489  return Status::Failure;
490  }
491  if (loadedNetwork->IsAsyncEnabled())
492  {
493  ARMNN_LOG(error) << "Network " << networkId << " is async enabled.\n";
494  return Status::Failure;
495  }
497 
499 
500  static thread_local NetworkId lastId = networkId;
501  if (lastId != networkId)
502  {
503  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
504  {
505  network->FreeWorkingMemory();
506  });
507  }
508  lastId=networkId;
509 
510  return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors);
511 }
512 
514  const InputTensors& inputTensors,
515  const OutputTensors& outputTensors)
516 {
517  NetworkId networkId = iWorkingMemHandle.GetNetworkId();
518  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
519 
520  if (!loadedNetwork)
521  {
522  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
523  return Status::Failure;
524  }
525  if (!loadedNetwork->IsAsyncEnabled())
526  {
527  ARMNN_LOG(error) << "Attempting execute " << networkId << " when it is not async enabled.\n";
528  return Status::Failure;
529  }
531 
533 
534  return loadedNetwork->Execute(inputTensors, outputTensors, iWorkingMemHandle);
535 }
536 
537 /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
538 /// overlapped Execution by calling this function from different threads.
539 std::unique_ptr<IWorkingMemHandle> RuntimeImpl::CreateWorkingMemHandle(NetworkId networkId)
540 {
541  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
542 
543  if (!loadedNetwork)
544  {
545  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
546  return nullptr;
547  }
548  if (!loadedNetwork->IsAsyncEnabled())
549  {
550  ARMNN_LOG(error) << "Network " << networkId << " is not async enabled.\n";
551  return nullptr;
552  }
554 
555  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CreateWorkingMemHandle");
556 
557  static thread_local NetworkId lastId = networkId;
558  if (lastId != networkId)
559  {
560  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
561  {
562  network->FreeWorkingMemory();
563  });
564  }
565  lastId=networkId;
566 
567  return loadedNetwork->CreateWorkingMemHandle(networkId);
568 }
569 
571 {
572  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
573  loadedNetwork->RegisterDebugCallback(func);
574 }
575 
576 void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
577 {
578  // Get the paths where to load the dynamic backends from
579  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
580 
581  // Get the shared objects to try to load as dynamic backends
582  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
583 
584  // Create a list of dynamic backends
585  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
586 
587  // Register the dynamic backends in the backend registry
588  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
589 
590  // Add the registered dynamic backend ids to the list of supported backends
591  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
592 }
593 
594 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:469
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
void WaitForProfilingServiceActivation(unsigned int timeout) override
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:39
FactoryFunction GetFactory(const BackendId &id) const
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:108
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
virtual NetworkId GetNetworkId()=0
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:526
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:69
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition: Runtime.cpp:183
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:30
void RegisterAllocator(const BackendId &id, std::shared_ptr< ICustomAllocator > alloc)
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition: Runtime.cpp:123
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:360
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors)
This is an experimental function.
Definition: Runtime.cpp:513
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:474
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Definition: Runtime.cpp:480
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:570
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:235
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:321
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:219
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:244
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors)
This is an experimental function.
Definition: Runtime.cpp:86
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:44
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle)
Thread safe execution of the loaded network.
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:113
void ReportStructure()
Definition: Runtime.cpp:247
BackendCapability
BackendCapability class.
Definition: Types.hpp:221
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:74
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
int NetworkId
Definition: IRuntime.hpp:24
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:93
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:361
Status
enumeration
Definition: Types.hpp:29
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:173
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
std::map< BackendId, std::shared_ptr< ICustomAllocator > > m_CustomAllocatorMap
A map to define a custom memory allocator for specific backend Ids.
Definition: IRuntime.hpp:136
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition: IRuntime.hpp:265
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition: Runtime.cpp:49
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:234
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:34
const IDeviceSpec & GetDeviceSpec() const
Definition: Runtime.cpp:98
bool m_ProtectedMode
Setting this flag will allow the user to create the Runtime in protected mode.
Definition: IRuntime.hpp:127
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
Single thread execution of the loaded network.
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:120
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:533
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
const std::shared_ptr< IProfiler > & GetProfiler() const
void RegisterDebugCallback(const DebugCallbackFunction &func)
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:262
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:103
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:539
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:160
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:79
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68