ArmNN
 21.05
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
10 #include <armnn/Logging.hpp>
11 #include <armnn/utility/Timer.hpp>
12 
16 
17 #include <iostream>
18 
20 
21 using namespace armnn;
22 using namespace std;
23 
24 namespace armnn
25 {
27 
29 
30 IRuntime::~IRuntime() = default;
31 
33 {
34  return new IRuntime(options);
35 }
36 
38 {
39  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
40 }
41 
43 {
44  delete runtime;
45 }
46 
48 {
49  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
50 }
51 
53  IOptimizedNetworkPtr network,
54  std::string& errorMessage)
55 {
56  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
57 }
58 
60  IOptimizedNetworkPtr network,
61  std::string& errorMessage,
62  const INetworkProperties& networkProperties)
63 {
64  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
65 }
66 
68 {
69  return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
70 }
71 
73 {
74  return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
75 }
76 
78  const InputTensors& inputTensors,
79  const OutputTensors& outputTensors)
80 {
81  return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors);
82 }
83 
85  const InputTensors& inputTensors,
86  const OutputTensors& outputTensors)
87 {
88  return pRuntimeImpl->Execute(workingMemHandle, inputTensors, outputTensors);
89 }
90 
92  const InputTensors& inputTensors,
93  const OutputTensors& outputTensors,
94  const QosExecPriority priority,
95  std::shared_ptr<IAsyncExecutionCallback> cb)
96 {
97  pRuntimeImpl->Schedule(networkId, inputTensors, outputTensors, priority, cb);
98 }
99 
101 {
102  return pRuntimeImpl->UnloadNetwork(networkId);
103 }
104 
106 {
107  return pRuntimeImpl->GetDeviceSpec();
108 }
109 
110 std::unique_ptr<IWorkingMemHandle> IRuntime::CreateWorkingMemHandle(NetworkId networkId)
111 {
112  return pRuntimeImpl->CreateWorkingMemHandle(networkId);
113 }
114 
115 const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
116 {
117  return pRuntimeImpl->GetProfiler(networkId);
118 }
119 
121 {
122  return pRuntimeImpl->RegisterDebugCallback(networkId, func);
123 }
124 
125 int RuntimeImpl::GenerateNetworkId()
126 {
127  return m_NetworkIdCounter++;
128 }
129 
131 {
132  std::string ignoredErrorMessage;
133  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
134 }
135 
137  IOptimizedNetworkPtr inNetwork,
138  std::string& errorMessage)
139 {
141  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
142 }
143 
145  IOptimizedNetworkPtr inNetwork,
146  std::string& errorMessage,
147  const INetworkProperties& networkProperties)
148 {
149  IOptimizedNetwork* rawNetwork = inNetwork.release();
150 
151  networkIdOut = GenerateNetworkId();
152 
153  for (auto&& context : m_BackendContexts)
154  {
155  context.second->BeforeLoadNetwork(networkIdOut);
156  }
157 
158  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
159  std::unique_ptr<IOptimizedNetwork>(rawNetwork),
160  errorMessage,
161  networkProperties,
162  m_ProfilingService,
163  networkIdOut);
164 
165  if (!loadedNetwork)
166  {
167  return Status::Failure;
168  }
169 
170  {
171  std::lock_guard<std::mutex> lockGuard(m_Mutex);
172 
173  // Stores the network
174  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
175  }
176 
177  for (auto&& context : m_BackendContexts)
178  {
179  context.second->AfterLoadNetwork(networkIdOut);
180  }
181 
182  if (m_ProfilingService.IsProfilingEnabled())
183  {
184  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
185  }
186 
187  return Status::Success;
188 }
189 
191 {
192  bool unloadOk = true;
193  for (auto&& context : m_BackendContexts)
194  {
195  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
196  }
197 
198  if (!unloadOk)
199  {
200  ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
201  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
202  return Status::Failure;
203  }
204 
205  std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
207  {
208  std::lock_guard<std::mutex> lockGuard(m_Mutex);
209 
210  // If timeline recording is on mark the Network end of life
211  if (timelineUtils)
212  {
213  auto search = m_LoadedNetworks.find(networkId);
214  if (search != m_LoadedNetworks.end())
215  {
216  profiling::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
217  timelineUtils->RecordEvent(networkGuid,
219  }
220  }
221  if (m_LoadedNetworks.erase(networkId) == 0)
222  {
223  ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
224  return Status::Failure;
225  }
226 
227  if (m_ProfilingService.IsProfilingEnabled())
228  {
229  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
230  }
231  }
232 
233  for (auto&& context : m_BackendContexts)
234  {
235  context.second->AfterUnloadNetwork(networkId);
236  }
237 
238  ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
239  return Status::Success;
240 }
241 
242 const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
243 {
244  auto it = m_LoadedNetworks.find(networkId);
245  if (it != m_LoadedNetworks.end())
246  {
247  auto& loadedNetwork = it->second;
248  return loadedNetwork->GetProfiler();
249  }
250 
251  return nullptr;
252 }
253 
254 void RuntimeImpl::ReportStructure() // armnn::profiling::IProfilingService& profilingService as param
255 {
256  // No-op for the time being, but this may be useful in future to have the profilingService available
257  // if (profilingService.IsProfilingEnabled()){}
258 
259  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
260  while (it != m_LoadedNetworks.end())
261  {
262  auto& loadedNetwork = it->second;
263  loadedNetwork->SendNetworkStructure();
264  // Increment the Iterator to point to next entry
265  it++;
266  }
267 }
268 
270  : m_NetworkIdCounter(0),
271  m_ProfilingService(*this)
272 {
273  const auto start_time = armnn::GetTimeNow();
274  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n";
275 
277  {
278  throw RuntimeException("It is not possible to enable timeline reporting without profiling being enabled");
279  }
280 
281  // Load any available/compatible dynamic backend before the runtime
282  // goes through the backend registry
283  LoadDynamicBackends(options.m_DynamicBackendsPath);
284 
285  BackendIdSet supportedBackends;
286  for (const auto& id : BackendRegistryInstance().GetBackendIds())
287  {
288  // Store backend contexts for the supported ones
289  try {
290  auto factoryFun = BackendRegistryInstance().GetFactory(id);
291  auto backend = factoryFun();
292  ARMNN_ASSERT(backend.get() != nullptr);
293 
294  auto context = backend->CreateBackendContext(options);
295 
296  // backends are allowed to return nullptrs if they
297  // don't wish to create a backend specific context
298  if (context)
299  {
300  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
301  }
302  supportedBackends.emplace(id);
303 
304  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
305  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
306  options, m_ProfilingService, id));
307 
308  // Backends may also provide a profiling context. Ask for it now.
309  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
310  // Backends that don't support profiling will return a null profiling context.
311  if (profilingContext)
312  {
313  // Pass the context onto the profiling service.
314  m_ProfilingService.AddBackendProfilingContext(id, profilingContext);
315  }
316  }
317  catch (const BackendUnavailableException&)
318  {
319  // Ignore backends which are unavailable
320  }
321  }
322 
323  BackendRegistryInstance().SetProfilingService(m_ProfilingService);
324  // pass configuration info to the profiling service
325  m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions);
327  {
328  // try to wait for the profiling service to initialise
329  m_ProfilingService.WaitForProfilingServiceActivation(3000);
330  }
331 
332  m_DeviceSpec.AddSupportedBackends(supportedBackends);
333 
334  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
335  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
336 }
337 
339 {
340  const auto start_time = armnn::GetTimeNow();
341  std::vector<int> networkIDs;
342  try
343  {
344  // Coverity fix: The following code may throw an exception of type std::length_error.
345  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
346  std::back_inserter(networkIDs),
347  [](const auto &pair) { return pair.first; });
348  }
349  catch (const std::exception& e)
350  {
351  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
352  // exception of type std::length_error.
353  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
354  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
355  << "\nSome of the loaded networks may not be unloaded" << std::endl;
356  }
357  // We then proceed to unload all the networks which IDs have been appended to the list
358  // up to the point the exception was thrown (if any).
359 
360  for (auto networkID : networkIDs)
361  {
362  try
363  {
364  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
365  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
366  UnloadNetwork(networkID);
367  }
368  catch (const std::exception& e)
369  {
370  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
371  // exception of type std::length_error.
372  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
373  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
374  << std::endl;
375  }
376  }
377 
378  // Clear all dynamic backends.
380  m_DeviceSpec.ClearDynamicBackends();
381  m_BackendContexts.clear();
382 
384  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
385  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
386 }
387 
388 LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
389 {
390  std::lock_guard<std::mutex> lockGuard(m_Mutex);
391  return m_LoadedNetworks.at(networkId).get();
392 }
393 
395 {
396  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
397 }
398 
400 {
401  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
402 }
403 
404 
406  const InputTensors& inputTensors,
407  const OutputTensors& outputTensors)
408 {
409  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
410 
411  if (!loadedNetwork)
412  {
413  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
414  return Status::Failure;
415  }
416  if (loadedNetwork->IsAsyncEnabled())
417  {
418  ARMNN_LOG(error) << "Network " << networkId << " is async enabled.\n";
419  return Status::Failure;
420  }
422 
424 
425  static thread_local NetworkId lastId = networkId;
426  if (lastId != networkId)
427  {
428  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
429  {
430  network->FreeWorkingMemory();
431  });
432  }
433  lastId=networkId;
434 
435  return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors);
436 }
437 
439  const InputTensors& inputTensors,
440  const OutputTensors& outputTensors)
441 {
442  NetworkId networkId = iWorkingMemHandle.GetNetworkId();
443  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
444 
445  if (!loadedNetwork)
446  {
447  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
448  return Status::Failure;
449  }
450  if (!loadedNetwork->IsAsyncEnabled())
451  {
452  ARMNN_LOG(error) << "Attempting execute " << networkId << " when it is not async enabled.\n";
453  return Status::Failure;
454  }
456 
458 
459  return loadedNetwork->Execute(inputTensors, outputTensors, iWorkingMemHandle);
460 }
461 
463  const InputTensors& inputTensors,
464  const OutputTensors& outputTensors,
465  const QosExecPriority priority,
466  std::shared_ptr<IAsyncExecutionCallback> callback)
467 {
468  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
469 
470  if (!loadedNetwork)
471  {
472  throw armnn::Exception(
473  "Network with ID of " + std::to_string(networkId) + " does not exist \n"
474  );
475  }
476  if (!loadedNetwork->IsAsyncEnabled())
477  {
478  throw armnn::Exception(
479  "Attempting to schedule Network " + std::to_string(networkId) + " when it is not async enabled \n"
480  );
481  }
482 
484 
486 
487  loadedNetwork->Schedule(inputTensors, outputTensors, priority, callback);
488 }
489 
490 /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
491 /// overlapped Execution by calling this function from different threads.
492 std::unique_ptr<IWorkingMemHandle> RuntimeImpl::CreateWorkingMemHandle(NetworkId networkId)
493 {
494  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
495 
496  if (!loadedNetwork)
497  {
498  ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.\n";
499  return nullptr;
500  }
501  if (!loadedNetwork->IsAsyncEnabled())
502  {
503  ARMNN_LOG(error) << "Network " << networkId << " is not async enabled.\n";
504  return nullptr;
505  }
507 
508  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CreateWorkingMemHandle");
509 
510  static thread_local NetworkId lastId = networkId;
511  if (lastId != networkId)
512  {
513  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
514  {
515  network->FreeWorkingMemory();
516  });
517  }
518  lastId=networkId;
519 
520  return loadedNetwork->CreateWorkingMemHandle(networkId);
521 }
522 
524 {
525  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
526  loadedNetwork->RegisterDebugCallback(func);
527 }
528 
529 void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
530 {
531  // Get the paths where to load the dynamic backends from
532  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
533 
534  // Get the shared objects to try to load as dynamic backends
535  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
536 
537  // Create a list of dynamic backends
538  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
539 
540  // Register the dynamic backends in the backend registry
541  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
542 
543  // Add the registered dynamic backend ids to the list of supported backends
544  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
545 }
546 
547 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:394
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
void WaitForProfilingServiceActivation(unsigned int timeout) override
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
FactoryFunction GetFactory(const BackendId &id) const
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:115
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
virtual NetworkId GetNetworkId()=0
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:489
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:67
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition: Runtime.cpp:190
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:28
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition: Runtime.cpp:130
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors)
This is an experimental function.
Definition: Runtime.cpp:438
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:399
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Definition: Runtime.cpp:405
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:523
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:242
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:316
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:173
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:243
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors)
This is an experimental function.
Definition: Runtime.cpp:84
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:42
void Schedule(const InputTensors &inputTensors, const OutputTensors &outputTensors, const QosExecPriority priority, std::shared_ptr< IAsyncExecutionCallback > cb)
Schedule an asynchronous execution on the loaded network.
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle)
Thread safe execution of the loaded network.
void Schedule(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, const QosExecPriority priority, std::shared_ptr< IAsyncExecutionCallback > callback)
This is an experimental function.
Definition: Runtime.cpp:462
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:120
void ReportStructure()
Definition: Runtime.cpp:254
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:72
void Schedule(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, const QosExecPriority priority, std::shared_ptr< IAsyncExecutionCallback > callback)
This is an experimental function Schedule a thread safe execution by taking the input tensors and an ...
Definition: Runtime.cpp:91
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
int NetworkId
Definition: IRuntime.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:100
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
Status
enumeration
Definition: Types.hpp:30
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition: IRuntime.hpp:232
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition: Runtime.cpp:47
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:233
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:32
const IDeviceSpec & GetDeviceSpec() const
Definition: Runtime.cpp:105
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
Single thread execution of the loaded network.
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:93
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:496
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
const std::shared_ptr< IProfiler > & GetProfiler() const
void RegisterDebugCallback(const DebugCallbackFunction &func)
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:269
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:110
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
QosExecPriority
Definition: Types.hpp:60
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
Definition: Runtime.cpp:492
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:117
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService, const NetworkId networkIdOut)
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:77
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68