18 #include <common/include/LabelsAndEventClasses.hpp> 24 using namespace armnn;
52 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
57 std::string& errorMessage)
59 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
64 std::string& errorMessage,
67 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
72 return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
77 return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
82 return pRuntimeImpl->ImportInputs(networkId, inputTensors);
87 return pRuntimeImpl->ImportOutputs(networkId, outputTensors);
92 return pRuntimeImpl->ClearImportedInputs(networkId, inputIds);
96 return pRuntimeImpl->ClearImportedOutputs(networkId, outputIds);
103 return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors);
109 std::vector<ImportedInputId> preImportedInputs,
110 std::vector<ImportedOutputId> preImportedOutputs)
112 return pRuntimeImpl->Execute(workingMemHandle, inputTensors, outputTensors, preImportedInputs, preImportedOutputs);
137 return pRuntimeImpl->RegisterDebugCallback(networkId, func);
140 int RuntimeImpl::GenerateNetworkId()
142 return m_NetworkIdCounter++;
147 std::string ignoredErrorMessage;
148 return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
153 std::string& errorMessage)
157 return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
162 std::string& errorMessage,
166 auto profiler = inNetwork->GetProfiler();
171 networkIdOut = GenerateNetworkId();
173 for (
auto&& context : m_BackendContexts)
175 context.second->BeforeLoadNetwork(networkIdOut);
179 std::unique_ptr<IOptimizedNetwork>(rawNetwork),
190 std::lock_guard<std::mutex> lockGuard(m_Mutex);
193 m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
196 for (
auto&& context : m_BackendContexts)
198 context.second->AfterLoadNetwork(networkIdOut);
201 if (m_ProfilingService.IsProfilingEnabled())
203 m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS);
211 bool unloadOk =
true;
212 for (
auto&& context : m_BackendContexts)
214 unloadOk &= context.second->BeforeUnloadNetwork(networkId);
220 "network with ID:" << networkId <<
" because BeforeUnloadNetwork failed";
224 std::unique_ptr<profiling::TimelineUtilityMethods> timelineUtils =
227 std::lock_guard<std::mutex> lockGuard(m_Mutex);
232 auto search = m_LoadedNetworks.find(networkId);
233 if (search != m_LoadedNetworks.end())
235 profiling::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
236 timelineUtils->RecordEvent(networkGuid,
237 profiling::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
240 if (m_LoadedNetworks.erase(networkId) == 0)
242 ARMNN_LOG(
warning) <<
"WARNING: RuntimeImpl::UnloadNetwork(): " << networkId <<
" not found!";
246 if (m_ProfilingService.IsProfilingEnabled())
248 m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS);
252 for (
auto&& context : m_BackendContexts)
254 context.second->AfterUnloadNetwork(networkId);
260 ARMNN_LOG(
debug) <<
"RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
266 auto it = m_LoadedNetworks.find(networkId);
267 if (it != m_LoadedNetworks.end())
269 auto& loadedNetwork = it->second;
270 return loadedNetwork->GetProfiler();
281 LoadedNetworks::iterator it = m_LoadedNetworks.begin();
282 while (it != m_LoadedNetworks.end())
284 auto& loadedNetwork = it->second;
285 loadedNetwork->SendNetworkStructure();
292 : m_NetworkIdCounter(0),
293 m_ProfilingService(*this)
300 "It is not possible to enable timeline reporting without profiling being enabled");
314 auto backend = factoryFun();
320 customAllocatorMapIterator->second ==
nullptr)
325 throw armnn::Exception(
"Allocator associated with id " +
id.Get() +
" is null");
333 BackendCapability protectedContentCapability {
"ProtectedContentAllocation",
true};
340 <<
" is not registered as does not support protected content allocation \n";
348 if (customAllocatorMapIterator->second->GetMemorySourceType()
351 if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
355 <<
" reported an error when entering protected mode. Backend won't be" 356 <<
" used. ErrorMsg: " << err;
364 ARMNN_LOG(
error) <<
"The CustomAllocator provided with the runtime options doesn't support " 365 "protected memory. Protected mode can't be activated. The backend " 367 <<
" is not going to be used. MemorySource must be MemorySource::DmaBufProtected";
373 ARMNN_LOG(
error) <<
"Protected mode can't be activated for backend: " 375 <<
" no custom allocator was provided to the runtime options.";
385 if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
389 <<
" reported an error when trying to use the provided custom allocator." 390 " Backend won't be used." 391 <<
" ErrorMsg: " << err;
408 id, customMemoryOptimizerStrategyMapIterator->second);
411 << customMemoryOptimizerStrategyMapIterator->second->GetName()
412 <<
" set for the backend " <<
id <<
".";
418 std::string memoryOptimizerStrategyName =
"";
421 if (name ==
"MemoryOptimizerStrategy")
423 memoryOptimizerStrategyName = ParseStringBackendOption(value,
"");
426 if (memoryOptimizerStrategyName !=
"")
428 std::shared_ptr<IMemoryOptimizerStrategy> strategy =
433 ARMNN_LOG(
warning) <<
"MemoryOptimizerStrategy: " << memoryOptimizerStrategyName
434 <<
" was not found \n";
446 << memoryOptimizerStrategyName <<
" set for the backend " <<
id <<
".";
452 <<
" does not have multi-axis packing capability and cannot support" 453 <<
"MemoryOptimizerStrategy: " << memoryOptimizerStrategyName <<
"\n";
459 auto context = backend->CreateBackendContext(options);
465 m_BackendContexts.emplace(std::make_pair(
id, std::move(context)));
467 supportedBackends.emplace(
id);
469 unique_ptr<armnn::profiling::IBackendProfiling> profilingIface =
471 options, m_ProfilingService,
id));
474 auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
476 if (profilingContext)
491 if (options.m_ProfilingOptions.m_EnableProfiling)
499 ARMNN_LOG(
info) <<
"Initialization time: " << std::setprecision(2)
506 std::vector<int> networkIDs;
510 std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
511 std::back_inserter(networkIDs),
512 [](
const auto &pair) {
return pair.first; });
514 catch (
const std::exception& e)
519 std::cerr <<
"WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
520 <<
"\nSome of the loaded networks may not be unloaded" << std::endl;
525 for (
auto networkID : networkIDs)
533 catch (
const std::exception& e)
538 std::cerr <<
"WARNING: An error has occurred when unloading network " << networkID <<
": " << e.what()
546 m_BackendContexts.clear();
555 std::lock_guard<std::mutex> lockGuard(m_Mutex);
556 return m_LoadedNetworks.at(networkId).get();
571 return GetLoadedNetworkPtr(networkId)->
ImportInputs(inputTensors);
576 return GetLoadedNetworkPtr(networkId)->
ImportOutputs(outputTensors);
592 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
596 ARMNN_LOG(
error) <<
"A Network with an id of " << networkId <<
" does not exist.\n";
601 ARMNN_LOG(
error) <<
"Network " << networkId <<
" is async enabled.\n";
608 static thread_local
NetworkId lastId = networkId;
609 if (lastId != networkId)
624 std::vector<ImportedInputId> preImportedInputs,
625 std::vector<ImportedOutputId> preImportedOutputs)
628 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
632 ARMNN_LOG(
error) <<
"A Network with an id of " << networkId <<
" does not exist.\n";
637 ARMNN_LOG(
error) <<
"Attempting execute " << networkId <<
" when it is not async enabled.\n";
644 return loadedNetwork->
Execute(inputTensors,
655 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
659 ARMNN_LOG(
error) <<
"A Network with an id of " << networkId <<
" does not exist.\n";
664 ARMNN_LOG(
error) <<
"Network " << networkId <<
" is not async enabled.\n";
671 static thread_local
NetworkId lastId = networkId;
672 if (lastId != networkId)
686 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
690 void RuntimeImpl::LoadDynamicBackends(
const std::string& overrideBackendPath)
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
Thread safe execution of the loaded network.
armnn::TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
void WaitForProfilingServiceActivation(unsigned int timeout) override
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
static IRuntimePtr Create(const CreationOptions &options)
FactoryFunction GetFactory(const BackendId &id) const
std::vector< ImportedOutputId > ImportOutputs(const OutputTensors &outputTensors)
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Very basic type safe variant.
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
virtual NetworkId GetNetworkId()=0
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
std::unordered_set< BackendId > BackendIdSet
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
void ParseOptions(const std::vector< BackendOptions > &options, BackendId backend, F f)
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs, std::vector< ImportedOutputId > preImportedOutputs)
This is an experimental function.
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
void RegisterMemoryOptimizerStrategy(const BackendId &id, std::shared_ptr< IMemoryOptimizerStrategy > strategy)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
void RegisterAllocator(const BackendId &id, std::shared_ptr< ICustomAllocator > alloc)
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
BackendRegistry & BackendRegistryInstance()
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors)
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
std::chrono::high_resolution_clock::time_point GetTimeNow()
std::map< BackendId, std::shared_ptr< IMemoryOptimizerStrategy > > m_MemoryOptimizerStrategyMap
A map to define a custom memory optimizer strategy for specific backend Ids.
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2021 ARM Limited and Contributors.
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
static void Destroy(IRuntime *runtime)
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors...
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
BackendCapability
BackendCapability class.
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Un-import and delete the imported InputTensor/s This function is not thread safe and must not be used...
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
void SetProfilingService(armnn::Optional< profiling::ProfilingService &> profilingService)
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
std::map< BackendId, std::shared_ptr< ICustomAllocator > > m_CustomAllocatorMap
A map to define a custom memory allocator for specific backend Ids.
#define ARMNN_ASSERT(COND)
Status Execute(IWorkingMemHandle &workingMemHandle, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
This is an experimental function.
std::unique_ptr< RuntimeImpl > pRuntimeImpl
void ClearImportedInputs(const std::vector< ImportedInputId > inputIds)
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Device specific knowledge to be passed to the optimizer.
static IRuntime * CreateRaw(const CreationOptions &options)
std::unique_ptr< IMemoryOptimizerStrategy > GetMemoryOptimizerStrategy(const std::string &strategyName)
const IDeviceSpec & GetDeviceSpec() const
bool m_ProtectedMode
Setting this flag will allow the user to create the Runtime in protected mode.
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
Single thread execution of the loaded network.
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
void RegisterProfiler(IProfiler *profiler)
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Base class for all ArmNN exceptions so that users can filter to just those.
const std::shared_ptr< IProfiler > & GetProfiler() const
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
void RegisterDebugCallback(const DebugCallbackFunction &func)
std::vector< ImportedInputId > ImportInputs(const InputTensors &inputTensors)
constexpr const char * GetMemBlockStrategyTypeName(MemBlockStrategyType memBlockStrategyType)
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
const BackendIdSet & GetDynamicBackends() const
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors)
void ClearDynamicBackends()
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Un-import and delete the imported OutputTensor/s This function is not thread safe and must not be use...
std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle(NetworkId networkId)
Create a new unique WorkingMemHandle object.
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
void AddBackendProfilingContext(const BackendId backendId, std::shared_ptr< armnn::profiling::IBackendProfilingContext > profilingContext)
ExternalProfilingOptions m_ProfilingOptions
ProfilingState ConfigureProfilingService(const ExternalProfilingOptions &options, bool resetProfilingService=false)
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors)
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
void ClearImportedOutputs(const std::vector< ImportedOutputId > outputIds)
Class for non-fatal exceptions raised while initialising a backend.