ArmNN
 21.02
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
10 #include <armnn/Logging.hpp>
11 #include <armnn/utility/Timer.hpp>
12 
16 
17 #include <iostream>
18 
20 
21 using namespace armnn;
22 using namespace std;
23 
24 namespace armnn
25 {
27 
29 
30 IRuntime::~IRuntime() = default;
31 
33 {
34  return new IRuntime(options);
35 }
36 
38 {
39  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
40 }
41 
43 {
44  delete runtime;
45 }
46 
48 {
49  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
50 }
51 
53  IOptimizedNetworkPtr network,
54  std::string& errorMessage)
55 {
56  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
57 }
58 
60  IOptimizedNetworkPtr network,
61  std::string& errorMessage,
62  const INetworkProperties& networkProperties)
63 {
64  return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
65 }
66 
68 {
69  return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
70 }
71 
73 {
74  return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
75 }
76 
78  const InputTensors& inputTensors,
79  const OutputTensors& outputTensors)
80 {
81  return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors);
82 }
83 
85 {
86  return pRuntimeImpl->UnloadNetwork(networkId);
87 }
88 
90 {
91  return pRuntimeImpl->GetDeviceSpec();
92 }
93 
94 const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
95 {
96  return pRuntimeImpl->GetProfiler(networkId);
97 }
98 
100 {
101  return pRuntimeImpl->RegisterDebugCallback(networkId, func);
102 }
103 
104 int RuntimeImpl::GenerateNetworkId()
105 {
106  return m_NetworkIdCounter++;
107 }
108 
110 {
111  std::string ignoredErrorMessage;
112  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
113 }
114 
116  IOptimizedNetworkPtr inNetwork,
117  std::string& errorMessage)
118 {
119  INetworkProperties networkProperties;
120  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
121 }
122 
124  IOptimizedNetworkPtr inNetwork,
125  std::string& errorMessage,
126  const INetworkProperties& networkProperties)
127 {
128  IOptimizedNetwork* rawNetwork = inNetwork.release();
129 
130  networkIdOut = GenerateNetworkId();
131 
132  for (auto&& context : m_BackendContexts)
133  {
134  context.second->BeforeLoadNetwork(networkIdOut);
135  }
136 
137  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
138  std::unique_ptr<IOptimizedNetwork>(rawNetwork),
139  errorMessage,
140  networkProperties,
141  m_ProfilingService);
142 
143  if (!loadedNetwork)
144  {
145  return Status::Failure;
146  }
147 
148  {
149  std::lock_guard<std::mutex> lockGuard(m_Mutex);
150 
151  // Stores the network
152  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
153  }
154 
155  for (auto&& context : m_BackendContexts)
156  {
157  context.second->AfterLoadNetwork(networkIdOut);
158  }
159 
160  if (m_ProfilingService.IsProfilingEnabled())
161  {
162  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
163  }
164 
165  return Status::Success;
166 }
167 
169 {
170  bool unloadOk = true;
171  for (auto&& context : m_BackendContexts)
172  {
173  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
174  }
175 
176  if (!unloadOk)
177  {
178  ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
179  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
180  return Status::Failure;
181  }
182 
183  std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
185  {
186  std::lock_guard<std::mutex> lockGuard(m_Mutex);
187 
188  // If timeline recording is on mark the Network end of life
189  if (timelineUtils)
190  {
191  auto search = m_LoadedNetworks.find(networkId);
192  if (search != m_LoadedNetworks.end())
193  {
194  profiling::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
195  timelineUtils->RecordEvent(networkGuid,
197  }
198  }
199  if (m_LoadedNetworks.erase(networkId) == 0)
200  {
201  ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
202  return Status::Failure;
203  }
204 
205  if (m_ProfilingService.IsProfilingEnabled())
206  {
207  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
208  }
209  }
210 
211  for (auto&& context : m_BackendContexts)
212  {
213  context.second->AfterUnloadNetwork(networkId);
214  }
215 
216  ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
217  return Status::Success;
218 }
219 
220 const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
221 {
222  auto it = m_LoadedNetworks.find(networkId);
223  if (it != m_LoadedNetworks.end())
224  {
225  auto& loadedNetwork = it->second;
226  return loadedNetwork->GetProfiler();
227  }
228 
229  return nullptr;
230 }
231 
232 void RuntimeImpl::ReportStructure() // armnn::profiling::IProfilingService& profilingService as param
233 {
234  // No-op for the time being, but this may be useful in future to have the profilingService available
235  // if (profilingService.IsProfilingEnabled()){}
236 
237  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
238  while (it != m_LoadedNetworks.end())
239  {
240  auto& loadedNetwork = it->second;
241  loadedNetwork->SendNetworkStructure();
242  // Increment the Iterator to point to next entry
243  it++;
244  }
245 }
246 
248  : m_NetworkIdCounter(0),
249  m_ProfilingService(*this)
250 {
251  const auto start_time = armnn::GetTimeNow();
252  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n";
253 
255  {
256  throw RuntimeException("It is not possible to enable timeline reporting without profiling being enabled");
257  }
258 
259  // Load any available/compatible dynamic backend before the runtime
260  // goes through the backend registry
261  LoadDynamicBackends(options.m_DynamicBackendsPath);
262 
263  BackendIdSet supportedBackends;
264  for (const auto& id : BackendRegistryInstance().GetBackendIds())
265  {
266  // Store backend contexts for the supported ones
267  try {
268  auto factoryFun = BackendRegistryInstance().GetFactory(id);
269  auto backend = factoryFun();
270  ARMNN_ASSERT(backend.get() != nullptr);
271 
272  auto context = backend->CreateBackendContext(options);
273 
274  // backends are allowed to return nullptrs if they
275  // don't wish to create a backend specific context
276  if (context)
277  {
278  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
279  }
280  supportedBackends.emplace(id);
281 
282  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
283  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
284  options, m_ProfilingService, id));
285 
286  // Backends may also provide a profiling context. Ask for it now.
287  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
288  // Backends that don't support profiling will return a null profiling context.
289  if (profilingContext)
290  {
291  // Pass the context onto the profiling service.
292  m_ProfilingService.AddBackendProfilingContext(id, profilingContext);
293  }
294  }
295  catch (const BackendUnavailableException&)
296  {
297  // Ignore backends which are unavailable
298  }
299  }
300 
301  BackendRegistryInstance().SetProfilingService(m_ProfilingService);
302  // pass configuration info to the profiling service
303  m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions);
305  {
306  // try to wait for the profiling service to initialise
307  m_ProfilingService.WaitForProfilingServiceActivation(3000);
308  }
309 
310  m_DeviceSpec.AddSupportedBackends(supportedBackends);
311 
312  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
313  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
314 }
315 
317 {
318  const auto start_time = armnn::GetTimeNow();
319  std::vector<int> networkIDs;
320  try
321  {
322  // Coverity fix: The following code may throw an exception of type std::length_error.
323  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
324  std::back_inserter(networkIDs),
325  [](const auto &pair) { return pair.first; });
326  }
327  catch (const std::exception& e)
328  {
329  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
330  // exception of type std::length_error.
331  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
332  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
333  << "\nSome of the loaded networks may not be unloaded" << std::endl;
334  }
335  // We then proceed to unload all the networks which IDs have been appended to the list
336  // up to the point the exception was thrown (if any).
337 
338  for (auto networkID : networkIDs)
339  {
340  try
341  {
342  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
343  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
344  UnloadNetwork(networkID);
345  }
346  catch (const std::exception& e)
347  {
348  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
349  // exception of type std::length_error.
350  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
351  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
352  << std::endl;
353  }
354  }
355 
356  // Clear all dynamic backends.
358  m_DeviceSpec.ClearDynamicBackends();
359  m_BackendContexts.clear();
360 
362  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
363  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
364 }
365 
366 LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
367 {
368  std::lock_guard<std::mutex> lockGuard(m_Mutex);
369  return m_LoadedNetworks.at(networkId).get();
370 }
371 
373 {
374  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
375 }
376 
378 {
379  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
380 }
381 
382 
384  const InputTensors& inputTensors,
385  const OutputTensors& outputTensors)
386 {
387  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
389 
391 
392  static thread_local NetworkId lastId = networkId;
393  if (lastId != networkId)
394  {
395  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
396  {
397  network->FreeWorkingMemory();
398  });
399  }
400  lastId=networkId;
401 
402  return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors);
403 }
404 
406 {
407  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
408  loadedNetwork->RegisterDebugCallback(func);
409 }
410 
411 void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
412 {
413  // Get the paths where to load the dynamic backends from
414  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
415 
416  // Get the shared objects to try to load as dynamic backends
417  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
418 
419  // Create a list of dynamic backends
420  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
421 
422  // Register the dynamic backends in the backend registry
423  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
424 
425  // Add the registered dynamic backend ids to the list of supported backends
426  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
427 }
428 
429 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:372
void WaitForProfilingServiceActivation(unsigned int timeout) override
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
FactoryFunction GetFactory(const BackendId &id) const
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:94
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:489
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:67
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition: Runtime.cpp:168
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition: Runtime.cpp:109
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:377
int NetworkId
Definition: IRuntime.hpp:20
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Definition: Runtime.cpp:383
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:405
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:220
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:283
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:173
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:42
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:99
void ReportStructure()
Definition: Runtime.cpp:232
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:72
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:84
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
Status
enumeration
Definition: Types.hpp:26
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition: IRuntime.hpp:177
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition: Runtime.cpp:47
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:200
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:32
const IDeviceSpec & GetDeviceSpec() const
Definition: Runtime.cpp:89
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:60
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:496
const std::shared_ptr< IProfiler > & GetProfiler() const
void RegisterDebugCallback(const DebugCallbackFunction &func)
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:247
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:84
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:77
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68