ArmNN
 20.02
Runtime.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Runtime.hpp"
6 
7 #include <armnn/Version.hpp>
9 #include <armnn/Logging.hpp>
10 
13 
14 #include <ProfilingService.hpp>
15 
16 #include <iostream>
17 
18 #include <boost/polymorphic_cast.hpp>
20 
21 using namespace armnn;
22 using namespace std;
23 
24 namespace armnn
25 {
26 
28 {
29  return new Runtime(options);
30 }
31 
33 {
34  return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy);
35 }
36 
38 {
39  delete boost::polymorphic_downcast<Runtime*>(runtime);
40 }
41 
42 int Runtime::GenerateNetworkId()
43 {
44  return m_NetworkIdCounter++;
45 }
46 
48 {
49  std::string ignoredErrorMessage;
50  return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
51 }
52 
54  IOptimizedNetworkPtr inNetwork,
55  std::string& errorMessage)
56 {
57  INetworkProperties networkProperties;
58  return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
59 }
60 
62  IOptimizedNetworkPtr inNetwork,
63  std::string& errorMessage,
64  const INetworkProperties& networkProperties)
65 {
66  IOptimizedNetwork* rawNetwork = inNetwork.release();
67 
68  networkIdOut = GenerateNetworkId();
69 
70  for (auto&& context : m_BackendContexts)
71  {
72  context.second->BeforeLoadNetwork(networkIdOut);
73  }
74 
75  unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
76  std::unique_ptr<OptimizedNetwork>(boost::polymorphic_downcast<OptimizedNetwork*>(rawNetwork)),
77  errorMessage,
78  networkProperties);
79 
80  if (!loadedNetwork)
81  {
82  return Status::Failure;
83  }
84 
85  {
86  std::lock_guard<std::mutex> lockGuard(m_Mutex);
87 
88  // Stores the network
89  m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
90  }
91 
92  for (auto&& context : m_BackendContexts)
93  {
94  context.second->AfterLoadNetwork(networkIdOut);
95  }
96 
97  if (profiling::ProfilingService::Instance().IsProfilingEnabled())
98  {
99  profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
100  }
101 
102  return Status::Success;
103 }
104 
106 {
107  bool unloadOk = true;
108  for (auto&& context : m_BackendContexts)
109  {
110  unloadOk &= context.second->BeforeUnloadNetwork(networkId);
111  }
112 
113  if (!unloadOk)
114  {
115  ARMNN_LOG(warning) << "Runtime::UnloadNetwork(): failed to unload "
116  "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
117  return Status::Failure;
118  }
119 
120  {
121  std::lock_guard<std::mutex> lockGuard(m_Mutex);
122 
123  if (m_LoadedNetworks.erase(networkId) == 0)
124  {
125  ARMNN_LOG(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!";
126  return Status::Failure;
127  }
128  if (profiling::ProfilingService::Instance().IsProfilingEnabled())
129  {
130  profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
131  }
132  }
133 
134  for (auto&& context : m_BackendContexts)
135  {
136  context.second->AfterUnloadNetwork(networkId);
137  }
138 
139  ARMNN_LOG(debug) << "Runtime::UnloadNetwork(): Unloaded network with ID: " << networkId;
140  return Status::Success;
141 }
142 
143 const std::shared_ptr<IProfiler> Runtime::GetProfiler(NetworkId networkId) const
144 {
145  auto it = m_LoadedNetworks.find(networkId);
146  if (it != m_LoadedNetworks.end())
147  {
148  auto& loadedNetwork = it->second;
149  return loadedNetwork->GetProfiler();
150  }
151 
152  return nullptr;
153 }
154 
156  : m_NetworkIdCounter(0)
157 {
158  ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n";
159 
160  // pass configuration info to the profiling service
162 
163  // Load any available/compatible dynamic backend before the runtime
164  // goes through the backend registry
165  LoadDynamicBackends(options.m_DynamicBackendsPath);
166 
167  BackendIdSet supportedBackends;
168  for (const auto& id : BackendRegistryInstance().GetBackendIds())
169  {
170  // Store backend contexts for the supported ones
171  try {
172  auto factoryFun = BackendRegistryInstance().GetFactory(id);
173  auto backend = factoryFun();
174  BOOST_ASSERT(backend.get() != nullptr);
175 
176  auto context = backend->CreateBackendContext(options);
177 
178  // backends are allowed to return nullptrs if they
179  // don't wish to create a backend specific context
180  if (context)
181  {
182  m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
183  }
184  supportedBackends.emplace(id);
185 
186  unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
187  std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling(
189 
190  // Backends may also provide a profiling context. Ask for it now.
191  auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
192  // Backends that don't support profiling will return a null profiling context.
193  if (profilingContext)
194  {
195  // Enable profiling on the backend and assert that it returns true
196  if(profilingContext->EnableProfiling(true))
197  {
198  // Pass the context onto the profiling service.
200  }
201  else
202  {
203  throw BackendProfilingException("Unable to enable profiling on Backend Id: " + id.Get());
204  }
205  }
206  }
207  catch (const BackendUnavailableException&)
208  {
209  // Ignore backends which are unavailable
210  }
211 
212  }
213  m_DeviceSpec.AddSupportedBackends(supportedBackends);
214 }
215 
217 {
218  std::vector<int> networkIDs;
219  try
220  {
221  // Coverity fix: The following code may throw an exception of type std::length_error.
222  std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
223  std::back_inserter(networkIDs),
224  [](const auto &pair) { return pair.first; });
225  }
226  catch (const std::exception& e)
227  {
228  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
229  // exception of type std::length_error.
230  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
231  std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
232  << "\nSome of the loaded networks may not be unloaded" << std::endl;
233  }
234  // We then proceed to unload all the networks which IDs have been appended to the list
235  // up to the point the exception was thrown (if any).
236 
237  for (auto networkID : networkIDs)
238  {
239  try
240  {
241  // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
242  // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
243  UnloadNetwork(networkID);
244  }
245  catch (const std::exception& e)
246  {
247  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
248  // exception of type std::length_error.
249  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
250  std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
251  << std::endl;
252  }
253  }
254 
255 
256  // Clear all dynamic backends.
258  m_DeviceSpec.ClearDynamicBackends();
259  m_BackendContexts.clear();
260 }
261 
262 LoadedNetwork* Runtime::GetLoadedNetworkPtr(NetworkId networkId) const
263 {
264  std::lock_guard<std::mutex> lockGuard(m_Mutex);
265  return m_LoadedNetworks.at(networkId).get();
266 }
267 
269 {
270  return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
271 }
272 
274 {
275  return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
276 }
277 
278 
280  const InputTensors& inputTensors,
281  const OutputTensors& outputTensors)
282 {
283  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
284 
285  static thread_local NetworkId lastId = networkId;
286  if (lastId != networkId)
287  {
288  LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
289  {
290  network->FreeWorkingMemory();
291  });
292  }
293  lastId=networkId;
294 
295  return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors);
296 }
297 
299 {
300  LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
301  loadedNetwork->RegisterDebugCallback(func);
302 }
303 
304 void Runtime::LoadDynamicBackends(const std::string& overrideBackendPath)
305 {
306  // Get the paths where to load the dynamic backends from
307  std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
308 
309  // Get the shared objects to try to load as dynamic backends
310  std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
311 
312  // Create a list of dynamic backends
313  m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
314 
315  // Register the dynamic backends in the backend registry
316  BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
317 
318  // Add the registered dynamic backend ids to the list of supported backends
319  m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
320 }
321 
322 } // namespace armnn
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Definition: DeviceSpec.hpp:30
#define ARMNN_VERSION
ARMNN_VERSION: "YYYYMMPP" where: YYYY = 4-digit year number MM = 2-digit month number PP = 2-digit pa...
Definition: Version.hpp:24
virtual TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override
Definition: Runtime.cpp:273
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
static ProfilingService & Instance()
FactoryFunction GetFactory(const BackendId &id) const
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
int NetworkId
Definition: IRuntime.hpp:19
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2020 ARM Limited.
uint32_t IncrementCounterValue(uint16_t counterUid) override
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:244
virtual const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const override
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:143
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
static void Destroy(IRuntime *runtime)
Definition: Runtime.cpp:37
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< OptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties)
virtual void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func) override
Registers a callback function to debug layers performing custom computations on intermediate tensors...
Definition: Runtime.cpp:298
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
Status
enumeration
Definition: Types.hpp:26
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
static IRuntime * CreateRaw(const CreationOptions &options)
Definition: Runtime.cpp:27
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:58
virtual Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network) override
Loads a complete network into the Runtime.
Definition: Runtime.cpp:47
void RegisterDebugCallback(const DebugCallbackFunction &func)
const BackendIdSet & GetDynamicBackends() const
Definition: DeviceSpec.hpp:48
void ClearDynamicBackends()
Definition: DeviceSpec.hpp:39
virtual Status UnloadNetwork(NetworkId networkId) override
Unloads a network from the Runtime.
Definition: Runtime.cpp:105
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
ExternalProfilingOptions m_ProfilingOptions
Definition: IRuntime.hpp:76
Runtime(const CreationOptions &options)
Creates a runtime for workload execution.
Definition: Runtime.cpp:155
virtual Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors) override
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:279
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
virtual TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override
Definition: Runtime.cpp:268
Class for non-fatal exceptions raised while initialising a backend.
Definition: Exceptions.hpp:68