21 using namespace armnn;
49 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
54 std::string& errorMessage)
56 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
61 std::string& errorMessage,
64 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
69 return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
74 return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
81 return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors);
101 return pRuntimeImpl->RegisterDebugCallback(networkId, func);
104 int RuntimeImpl::GenerateNetworkId()
106 return m_NetworkIdCounter++;
111 std::string ignoredErrorMessage;
112 return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
117 std::string& errorMessage)
120 return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
125 std::string& errorMessage,
130 networkIdOut = GenerateNetworkId();
132 for (
auto&& context : m_BackendContexts)
134 context.second->BeforeLoadNetwork(networkIdOut);
138 std::unique_ptr<IOptimizedNetwork>(rawNetwork),
149 std::lock_guard<std::mutex> lockGuard(m_Mutex);
152 m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
155 for (
auto&& context : m_BackendContexts)
157 context.second->AfterLoadNetwork(networkIdOut);
160 if (m_ProfilingService.IsProfilingEnabled())
162 m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
170 bool unloadOk =
true;
171 for (
auto&& context : m_BackendContexts)
173 unloadOk &= context.second->BeforeUnloadNetwork(networkId);
179 "network with ID:" << networkId <<
" because BeforeUnloadNetwork failed";
183 std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
186 std::lock_guard<std::mutex> lockGuard(m_Mutex);
191 auto search = m_LoadedNetworks.find(networkId);
192 if (search != m_LoadedNetworks.end())
195 timelineUtils->RecordEvent(networkGuid,
199 if (m_LoadedNetworks.erase(networkId) == 0)
201 ARMNN_LOG(
warning) <<
"WARNING: RuntimeImpl::UnloadNetwork(): " << networkId <<
" not found!";
205 if (m_ProfilingService.IsProfilingEnabled())
207 m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
211 for (
auto&& context : m_BackendContexts)
213 context.second->AfterUnloadNetwork(networkId);
216 ARMNN_LOG(
debug) <<
"RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
222 auto it = m_LoadedNetworks.find(networkId);
223 if (it != m_LoadedNetworks.end())
225 auto& loadedNetwork = it->second;
226 return loadedNetwork->GetProfiler();
237 LoadedNetworks::iterator it = m_LoadedNetworks.begin();
238 while (it != m_LoadedNetworks.end())
240 auto& loadedNetwork = it->second;
241 loadedNetwork->SendNetworkStructure();
248 : m_NetworkIdCounter(0),
249 m_ProfilingService(*this)
256 throw RuntimeException(
"It is not possible to enable timeline reporting without profiling being enabled");
269 auto backend = factoryFun();
272 auto context = backend->CreateBackendContext(options);
278 m_BackendContexts.emplace(std::make_pair(
id, std::move(context)));
280 supportedBackends.emplace(
id);
282 unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
284 options, m_ProfilingService,
id));
287 auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
289 if (profilingContext)
312 ARMNN_LOG(
info) <<
"Initialization time: " << std::setprecision(2)
319 std::vector<int> networkIDs;
323 std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
324 std::back_inserter(networkIDs),
325 [](
const auto &pair) {
return pair.first; });
327 catch (
const std::exception& e)
332 std::cerr <<
"WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
333 <<
"\nSome of the loaded networks may not be unloaded" << std::endl;
338 for (
auto networkID : networkIDs)
346 catch (
const std::exception& e)
351 std::cerr <<
"WARNING: An error has occurred when unloading network " << networkID <<
": " << e.what()
359 m_BackendContexts.clear();
368 std::lock_guard<std::mutex> lockGuard(m_Mutex);
369 return m_LoadedNetworks.at(networkId).get();
387 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
392 static thread_local
NetworkId lastId = networkId;
393 if (lastId != networkId)
407 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
411 void RuntimeImpl::LoadDynamicBackends(
const std::string& overrideBackendPath)
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
void WaitForProfilingServiceActivation(unsigned int timeout) override
static IRuntimePtr Create(const CreationOptions &options)
FactoryFunction GetFactory(const BackendId &id) const
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
std::unordered_set< BackendId > BackendIdSet
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
std::chrono::high_resolution_clock::time_point GetTimeNow()
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
static void Destroy(IRuntime *runtime)
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
#define ARMNN_ASSERT(COND)
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Device specific knowledge to be passed to the optimizer.
static IRuntime * CreateRaw(const CreationOptions &options)
const IDeviceSpec & GetDeviceSpec() const
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
void RegisterProfiler(IProfiler *profiler)
const std::shared_ptr< IProfiler > & GetProfiler() const
void RegisterDebugCallback(const DebugCallbackFunction &func)
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
const BackendIdSet & GetDynamicBackends() const
void ClearDynamicBackends()
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Class for non-fatal exceptions raised while initialising a backend.