ArmNN
 20.05
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
9 #include <armnn/Logging.hpp>
10 #include <armnn/utility/Timer.hpp>
11 
15 
16 #include <iostream>
17 
19 
20 using namespace armnn;
21 using namespace std;
22 
23 namespace armnn
24 {
25 
27 {
28  return new Runtime(options);
29 }
30 
32 {
33  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
34 }
35 
37 {
38  delete PolymorphicDowncast<Runtime*>(runtime);
39 }
40 
41 int Runtime::GenerateNetworkId()
42 {
43  return m_NetworkIdCounter++;
44 }
45 
47 {
48  std::string ignoredErrorMessage;
49  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
50 }
51 
53  IOptimizedNetworkPtr inNetwork,
54  std::string& errorMessage)
55 {
56  INetworkProperties networkProperties;
57  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
58 }
59 
61  IOptimizedNetworkPtr inNetwork,
62  std::string& errorMessage,
63  const INetworkProperties& networkProperties)
64 {
65  IOptimizedNetwork* rawNetwork = inNetwork.release();
66 
67  networkIdOut = GenerateNetworkId();
68 
69  for (auto&& context : m_BackendContexts)
70  {
71  context.second->BeforeLoadNetwork(networkIdOut);
72  }
73 
74  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
75  std::unique_ptr<OptimizedNetwork>(PolymorphicDowncast<OptimizedNetwork*>(rawNetwork)),
76  errorMessage,
77  networkProperties,
78  m_ProfilingService);
79 
80  if (!loadedNetwork)
81  {
82  return Status::Failure;
83  }
84 
85  {
86  std::lock_guard<std::mutex> lockGuard(m_Mutex);
87 
88  // Stores the network
89  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
90  }
91 
92  for (auto&& context : m_BackendContexts)
93  {
94  context.second->AfterLoadNetwork(networkIdOut);
95  }
96 
97  if (m_ProfilingService.IsProfilingEnabled())
98  {
99  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
100  }
101 
102  return Status::Success;
103 }
104 
106 {
107  bool unloadOk = true;
108  for (auto&& context : m_BackendContexts)
109  {
110  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
111  }
112 
113  if (!unloadOk)
114  {
115  ARMNN_LOG(warning) << "Runtime::UnloadNetwork(): failed to unload "
116  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
117  return Status::Failure;
118  }
119 
120  {
121  std::lock_guard<std::mutex> lockGuard(m_Mutex);
122 
123  if (m_LoadedNetworks.erase(networkId) == 0)
124  {
125  ARMNN_LOG(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!";
126  return Status::Failure;
127  }
128 
129  if (m_ProfilingService.IsProfilingEnabled())
130  {
131  m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
132  }
133  }
134 
135  for (auto&& context : m_BackendContexts)
136  {
137  context.second->AfterUnloadNetwork(networkId);
138  }
139 
140  ARMNN_LOG(debug) << "Runtime::UnloadNetwork(): Unloaded network with ID: " << networkId;
141  return Status::Success;
142 }
143 
144 const std::shared_ptr<IProfiler> Runtime::GetProfiler(NetworkId networkId) const
145 {
146  auto it = m_LoadedNetworks.find(networkId);
147  if (it != m_LoadedNetworks.end())
148  {
149  auto& loadedNetwork = it->second;
150  return loadedNetwork->GetProfiler();
151  }
152 
153  return nullptr;
154 }
155 
156 void Runtime::ReportStructure() // armnn::profiling::IProfilingService& profilingService as param
157 {
158  // No-op for the time being, but this may be useful in future to have the profilingService available
159  // if (profilingService.IsProfilingEnabled()){}
160 
161  LoadedNetworks::iterator it = m_LoadedNetworks.begin();
162  while (it != m_LoadedNetworks.end())
163  {
164  auto& loadedNetwork = it->second;
165  loadedNetwork->SendNetworkStructure();
166  // Increment the Iterator to point to next entry
167  it++;
168  }
169 }
170 
172  : m_NetworkIdCounter(0),
173  m_ProfilingService(*this)
174 {
175  const auto start_time = armnn::GetTimeNow();
176  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n";
177 
179  {
180  throw RuntimeException("It is not possible to enable timeline reporting without profiling being enabled");
181  }
182 
183  // Load any available/compatible dynamic backend before the runtime
184  // goes through the backend registry
185  LoadDynamicBackends(options.m_DynamicBackendsPath);
186 
187  BackendIdSet supportedBackends;
188  for (const auto& id : BackendRegistryInstance().GetBackendIds())
189  {
190  // Store backend contexts for the supported ones
191  try {
192  auto factoryFun = BackendRegistryInstance().GetFactory(id);
193  auto backend = factoryFun();
194  ARMNN_ASSERT(backend.get() != nullptr);
195 
196  auto context = backend->CreateBackendContext(options);
197 
198  // backends are allowed to return nullptrs if they
199  // don't wish to create a backend specific context
200  if (context)
201  {
202  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
203  }
204  supportedBackends.emplace(id);
205 
206  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
207  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
208  options, m_ProfilingService, id));
209 
210  // Backends may also provide a profiling context. Ask for it now.
211  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
212  // Backends that don't support profiling will return a null profiling context.
213  if (profilingContext)
214  {
215  // Pass the context onto the profiling service.
216  m_ProfilingService.AddBackendProfilingContext(id, profilingContext);
217  }
218  }
219  catch (const BackendUnavailableException&)
220  {
221  // Ignore backends which are unavailable
222  }
223  }
224 
225  BackendRegistryInstance().SetProfilingService(m_ProfilingService);
226  // pass configuration info to the profiling service
227  m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions);
228 
229  m_DeviceSpec.AddSupportedBackends(supportedBackends);
230 
231  ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
232  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
233 }
234 
236 {
237  const auto start_time = armnn::GetTimeNow();
238  std::vector<int> networkIDs;
239  try
240  {
241  // Coverity fix: The following code may throw an exception of type std::length_error.
242  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
243  std::back_inserter(networkIDs),
244  [](const auto &pair) { return pair.first; });
245  }
246  catch (const std::exception& e)
247  {
248  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
249  // exception of type std::length_error.
250  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
251  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
252  << "\nSome of the loaded networks may not be unloaded" << std::endl;
253  }
254  // We then proceed to unload all the networks which IDs have been appended to the list
255  // up to the point the exception was thrown (if any).
256 
257  for (auto networkID : networkIDs)
258  {
259  try
260  {
261  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
262  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
263  UnloadNetwork(networkID);
264  }
265  catch (const std::exception& e)
266  {
267  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
268  // exception of type std::length_error.
269  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
270  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
271  << std::endl;
272  }
273  }
274 
275  // Clear all dynamic backends.
277  m_DeviceSpec.ClearDynamicBackends();
278  m_BackendContexts.clear();
279 
281  ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
282  << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
283 }
284 
285 LoadedNetwork* Runtime::GetLoadedNetworkPtr(NetworkId networkId) const
286 {
287  std::lock_guard<std::mutex> lockGuard(m_Mutex);
288  return m_LoadedNetworks.at(networkId).get();
289 }
290 
292 {
293  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
294 }
295 
297 {
298  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
299 }
300 
301 
303  const InputTensors& inputTensors,
304  const OutputTensors& outputTensors)
305 {
306  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
307 
308  static thread_local NetworkId lastId = networkId;
309  if (lastId != networkId)
310  {
311  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
312  {
313  network->FreeWorkingMemory();
314  });
315  }
316  lastId=networkId;
317 
318  return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors);
319 }
320 
322 {
323  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
324  loadedNetwork->RegisterDebugCallback(func);
325 }
326 
327 void Runtime::LoadDynamicBackends(const std::string& overrideBackendPath)
328 {
329  // Get the paths where to load the dynamic backends from
330  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
331 
332  // Get the shared objects to try to load as dynamic backends
333  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
334 
335  // Create a list of dynamic backends
336  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
337 
338  // Register the dynamic backends in the backend registry
339  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
340 
341  // Add the registered dynamic backend ids to the list of supported backends
342  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
343 }
344 
345 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition: Version.hpp:22
virtual TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override
Definition: Runtime.cpp:296
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:31
FactoryFunction GetFactory(const BackendId &id) const
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
int NetworkId
Definition: IRuntime.hpp:20
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2020 ARM Limited.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:244
virtual const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const override
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:144
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:36
virtual void ReportStructure() override
Definition: Runtime.cpp:156
virtual void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func) override
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:321
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
Status
enumeration
Definition: Types.hpp:26
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:573
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:26
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:59
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
virtual Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network) override
Loads a complete network into the Runtime.
Definition: Runtime.cpp:46
void RegisterDebugCallback(const DebugCallbackFunction &func)
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
virtual Status UnloadNetwork(NetworkId networkId) override
Unloads a network from the Runtime.
Definition: Runtime.cpp:105
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:83
Runtime(const CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:171
virtual Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors) override
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:302
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< OptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
virtual TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override
Definition: Runtime.cpp:291
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68