ArmNN
 20.08
LoadedNetwork Class Reference

#include <LoadedNetwork.hpp>

Public Types

using WorkloadQueue = std::vector< std::unique_ptr< IWorkload > >
 

Public Member Functions

 ~LoadedNetwork ()
 
TensorInfo GetInputTensorInfo (LayerBindingId layerId) const
 
TensorInfo GetOutputTensorInfo (LayerBindingId layerId) const
 
Status EnqueueWorkload (const InputTensors &inputTensors, const OutputTensors &outputTensors)
 
const std::shared_ptr< Profiler > & GetProfiler () const
 
void FreeWorkingMemory ()
 
void RegisterDebugCallback (const DebugCallbackFunction &func)
 
void SendNetworkStructure ()
 
profiling::ProfilingGuid GetNetworkGuid ()
 

Static Public Member Functions

static std::unique_ptr< LoadedNetworkMakeLoadedNetwork (std::unique_ptr< OptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
 

Detailed Description

Definition at line 34 of file LoadedNetwork.hpp.

Member Typedef Documentation

◆ WorkloadQueue

using WorkloadQueue = std::vector< std::unique_ptr<IWorkload> >

Definition at line 37 of file LoadedNetwork.hpp.

Constructor & Destructor Documentation

◆ ~LoadedNetwork()

~LoadedNetwork ( )
inline

Definition at line 38 of file LoadedNetwork.hpp.

References armnn::GetInputTensorInfo().

Member Function Documentation

◆ EnqueueWorkload()

Status EnqueueWorkload ( const InputTensors inputTensors,
const OutputTensors outputTensors 
)

Definition at line 464 of file LoadedNetwork.cpp.

References ARMNN_ASSERT_MSG, ARMNN_LOG, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS, ARMNN_SCOPED_HEAP_PROFILING, ARMNN_SCOPED_PROFILING_EVENT, armnn::CheckFlag(), LabelsAndEventClasses::EXECUTION_OF_GUID, armnn::Failure, ITensorHandle::GetImportFlags(), Graph::GetInputLayers(), Layer::GetInputSlots(), Graph::GetNumInputs(), Layer::GetNumInputSlots(), Graph::GetNumLayers(), Graph::GetNumOutputs(), Layer::GetNumOutputSlots(), Layer::GetOutputHandler(), Graph::GetOutputLayers(), TimelineUtilityMethods::GetTimelineUtils(), Layer::GetType(), armnn::IgnoreUnused(), ITensorHandle::Import(), LabelsAndEventClasses::INFERENCE_GUID, armnn::info, armnn::Input, QueueDescriptor::m_Inputs, WorkloadInfo::m_InputTensorInfos, QueueDescriptor::m_Outputs, WorkloadInfo::m_OutputTensorInfos, armnn::Malloc, ITensorHandle::Map(), armnn::Output, armnn::Success, armnn::Undefined, ITensorHandle::Unmap(), and armnn::warning.

Referenced by Runtime::EnqueueWorkload().

466 {
467  const Graph& graph = m_OptimizedNetwork->GetGraph();
468 
469  // Walk graph to determine the order of execution.
470  if (graph.GetNumLayers() < 2)
471  {
472  ARMNN_LOG(warning) << "IRuntime::EnqueueWorkload()::Less than two nodes in graph";
473  return Status::Failure;
474  }
475 
476  // Data that must be kept alive for the entire execution of the workload.
477  WorkloadData workloadData(inputTensors, outputTensors);
478 
479  if (graph.GetNumInputs() != inputTensors.size())
480  {
481  throw InvalidArgumentException("Number of inputs provided does not match network.");
482  }
483 
484  // For each input to the network, call EnqueueInput with the data passed by the user.
485  {
487  m_InputQueue.clear();
488  m_InputQueue.reserve(graph.GetNumInputs());
489  for (const BindableLayer* inputLayer : graph.GetInputLayers())
490  {
491  const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId());
492  EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
493  }
494  }
495 
496  // For each output to the network, call EnqueueOutput with the data passed by the user.
497  {
499  m_OutputQueue.clear();
500  m_OutputQueue.reserve(graph.GetNumOutputs());
501  for (const BindableLayer* outputLayer : graph.GetOutputLayers())
502  {
503  const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId());
504  EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
505  }
506  }
507 
508  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
509  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
510  ProfilingGuid inferenceGuid = m_ProfilingService.GetNextGuid();
511  if (timelineUtils)
512  {
513  // Add inference timeline trace if profiling is enabled.
514  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
515  timelineUtils->CreateTypedEntity(inferenceGuid, LabelsAndEventClasses::INFERENCE_GUID);
516  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink,
517  networkGuid,
518  inferenceGuid,
520  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
521  }
522 
523  bool executionSucceeded = true;
524 
525  {
526  if (m_ProfilingService.IsProfilingEnabled())
527  {
528  m_ProfilingService.IncrementCounterValue(armnn::profiling::INFERENCES_RUN);
529  }
531  ARMNN_SCOPED_HEAP_PROFILING("Executing");
532  executionSucceeded = Execute(timelineUtils, inferenceGuid);
533  }
534 
535  if (timelineUtils)
536  {
537  // Add end of life of the inference timeline if profiling is enabled.
538  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
539  timelineUtils->Commit();
540  }
541  return executionSucceeded ? Status::Success : Status::Failure;
542 }
static ARMNN_DLLEXPORT ProfilingStaticGuid INFERENCE_GUID
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
uint32_t IncrementCounterValue(uint16_t counterUid) override
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:169
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_SOL_EVENT_CLASS
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
static ARMNN_DLLEXPORT ProfilingStaticGuid EXECUTION_OF_GUID
bool IsProfilingEnabled() const override
static ProfilingDynamicGuid GetNextGuid()

◆ FreeWorkingMemory()

void FreeWorkingMemory ( )

Definition at line 727 of file LoadedNetwork.cpp.

References ARMNN_LOG, and armnn::error.

Referenced by Runtime::EnqueueWorkload().

728 {
729  std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
730  if (!m_IsWorkingMemAllocated)
731  {
732  return;
733  }
734  // Informs the memory managers to release memory in it's respective memory group
735  for (auto&& workloadFactory : m_WorkloadFactories)
736  {
737  IBackendInternal::IMemoryManagerSharedPtr memoryManager = workloadFactory.second.second;
738  if (memoryManager)
739  {
740  memoryManager->Release();
741  }
742  }
743  m_TensorHandleFactoryRegistry.ReleaseMemory();
744  m_IsWorkingMemAllocated = false;
745 }
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void ReleaseMemory()
Release memory required for inference.

◆ GetInputTensorInfo()

TensorInfo GetInputTensorInfo ( LayerBindingId  layerId) const

Definition at line 315 of file LoadedNetwork.cpp.

References ARMNN_ASSERT_MSG.

Referenced by Runtime::GetInputTensorInfo().

316 {
317  for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
318  {
319  ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
320  if (inputLayer->GetBindingId() == layerId)
321  {
322  return inputLayer->GetOutputSlot(0).GetTensorInfo();
323  }
324  }
325 
326  throw InvalidArgumentException(boost::str(boost::format("No input layer is associated with id %1%") % layerId));
327 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15

◆ GetNetworkGuid()

profiling::ProfilingGuid GetNetworkGuid ( )

Definition at line 310 of file LoadedNetwork.cpp.

311 {
312  return m_OptimizedNetwork->GetGuid();
313 }

◆ GetOutputTensorInfo()

TensorInfo GetOutputTensorInfo ( LayerBindingId  layerId) const

Definition at line 329 of file LoadedNetwork.cpp.

References ARMNN_ASSERT_MSG, CHECK_LOCATION, BackendId::Get(), Layer::GetBackendId(), Layer::GetNameStr(), armnn::IgnoreUnused(), armnn::info, and IWorkloadFactory::IsLayerSupported().

Referenced by Runtime::GetOutputTensorInfo().

330 {
331  for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
332  {
333  ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
334  ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
335  if (outputLayer->GetBindingId() == layerId)
336  {
337  return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
338  }
339  }
340 
341  throw InvalidArgumentException(boost::str(boost::format("No output layer is associated with id %1%") % layerId));
342 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15

◆ GetProfiler()

const std::shared_ptr<Profiler>& GetProfiler ( ) const
inline

Definition at line 53 of file LoadedNetwork.hpp.

Referenced by Runtime::EnqueueWorkload().

53 { return m_Profiler; }

◆ MakeLoadedNetwork()

std::unique_ptr< LoadedNetwork > MakeLoadedNetwork ( std::unique_ptr< OptimizedNetwork net,
std::string &  errorMessage,
const INetworkProperties networkProperties,
profiling::ProfilingService profilingService 
)
static

Definition at line 84 of file LoadedNetwork.cpp.

References ARMNN_LOG, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS, armnn::BackendRegistryInstance(), IBackendInternal::CreateMemoryManager(), IBackendInternal::CreateWorkloadFactory(), armnn::error, armnnUtils::Processes::GetCurrentId(), BackendRegistry::GetFactory(), ProfilerManager::GetInstance(), TimelineUtilityMethods::GetTimelineUtils(), armnn::Input, INetworkProperties::m_ExportEnabled, INetworkProperties::m_ImportEnabled, armnn::MemImport, LabelsAndEventClasses::NETWORK_GUID, armnn::Output, LabelsAndEventClasses::PROCESS_ID_GUID, ProfilerManager::RegisterProfiler(), IBackendInternal::SupportsTensorAllocatorAPI(), and Graph::TopologicalSort().

Referenced by Runtime::LoadNetwork().

88 {
89  std::unique_ptr<LoadedNetwork> loadedNetwork;
90 
91  auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
92  {
93  errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
94  ARMNN_LOG(error) << errorMessage;
95 
96  return std::unique_ptr<LoadedNetwork>();
97  };
98 
99  try
100  {
101  loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties, profilingService));
102  }
103  catch (const armnn::RuntimeException& error)
104  {
105  return Fail(error);
106  }
107  catch (const armnn::Exception& error)
108  {
109  return Fail(error);
110  }
111  catch (const std::runtime_error& error)
112  {
113  return Fail(error);
114  }
115 
116  return loadedNetwork;
117 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46

◆ RegisterDebugCallback()

void RegisterDebugCallback ( const DebugCallbackFunction func)

Definition at line 797 of file LoadedNetwork.cpp.

Referenced by Runtime::RegisterDebugCallback().

798 {
799  for (auto&& workloadPtr: m_WorkloadQueue)
800  {
801  workloadPtr.get()->RegisterDebugCallback(func);
802  }
803 }

◆ SendNetworkStructure()

void SendNetworkStructure ( )

Definition at line 273 of file LoadedNetwork.cpp.

References TimelineUtilityMethods::GetTimelineUtils(), armnn::Input, LabelsAndEventClasses::NETWORK_GUID, armnn::Output, and Graph::TopologicalSort().

274 {
275  Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
276  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
277 
278  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
279  TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
280 
281  timelineUtils->CreateTypedEntity(networkGuid, LabelsAndEventClasses::NETWORK_GUID);
282 
283  for (auto&& layer : order)
284  {
285  // Add layer to the post-optimisation network structure
286  AddLayerStructure(timelineUtils, *layer, networkGuid);
287  switch (layer->GetType())
288  {
289  case LayerType::Input:
290  case LayerType::Output:
291  {
292  // Inputs and outputs are treated in a special way - see EnqueueInput() and EnqueueOutput().
293  break;
294  }
295  default:
296  {
297  for (auto& workload : m_WorkloadQueue)
298  {
299  // Add workload to the post-optimisation network structure
300  AddWorkloadStructure(timelineUtils, workload, *layer);
301  }
302  break;
303  }
304  }
305  }
306  // Commit to send the post-optimisation network structure
307  timelineUtils->Commit();
308 }
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ARMNN_DLLEXPORT ProfilingStaticGuid NETWORK_GUID

The documentation for this class was generated from the following files: