ArmNN
 20.02
LoadedNetwork Class Reference

#include <LoadedNetwork.hpp>

Public Types

using WorkloadQueue = std::vector< std::unique_ptr< IWorkload > >
 

Public Member Functions

 ~LoadedNetwork ()
 
TensorInfo GetInputTensorInfo (LayerBindingId layerId) const
 
TensorInfo GetOutputTensorInfo (LayerBindingId layerId) const
 
Status EnqueueWorkload (const InputTensors &inputTensors, const OutputTensors &outputTensors)
 
const std::shared_ptr< Profiler > & GetProfiler () const
 
void FreeWorkingMemory ()
 
void RegisterDebugCallback (const DebugCallbackFunction &func)
 

Static Public Member Functions

static std::unique_ptr< LoadedNetworkMakeLoadedNetwork (std::unique_ptr< OptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties)
 

Detailed Description

Definition at line 33 of file LoadedNetwork.hpp.

Member Typedef Documentation

◆ WorkloadQueue

using WorkloadQueue = std::vector< std::unique_ptr<IWorkload> >

Definition at line 36 of file LoadedNetwork.hpp.

Constructor & Destructor Documentation

◆ ~LoadedNetwork()

~LoadedNetwork ( )
inline

Definition at line 37 of file LoadedNetwork.hpp.

References armnn::GetInputTensorInfo().

Member Function Documentation

◆ EnqueueWorkload()

Status EnqueueWorkload ( const InputTensors inputTensors,
const OutputTensors outputTensors 
)

Definition at line 412 of file LoadedNetwork.cpp.

References ARMNN_LOG, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS, ARMNN_SCOPED_HEAP_PROFILING, ARMNN_SCOPED_PROFILING_EVENT, armnn::CheckFlag(), armnn::Failure, ITensorHandle::GetImportFlags(), Graph::GetInputLayers(), Layer::GetInputSlots(), Graph::GetNumInputs(), Layer::GetNumInputSlots(), Graph::GetNumLayers(), Graph::GetNumOutputs(), Layer::GetNumOutputSlots(), Layer::GetOutputHandler(), Graph::GetOutputLayers(), TimelineUtilityMethods::GetTimelineUtils(), Layer::GetType(), ITensorHandle::Import(), ProfilingService::IncrementCounterValue(), LabelsAndEventClasses::INFERENCE_GUID, armnn::info, armnn::Input, ProfilingService::Instance(), QueueDescriptor::m_Inputs, WorkloadInfo::m_InputTensorInfos, QueueDescriptor::m_Outputs, WorkloadInfo::m_OutputTensorInfos, armnn::Malloc, ITensorHandle::Map(), ProfilingService::NextGuid(), armnn::Output, armnn::profiling::RetentionLink, armnn::Success, armnn::Undefined, ITensorHandle::Unmap(), and armnn::warning.

Referenced by Runtime::EnqueueWorkload().

414 {
416 
417  const Graph& graph = m_OptimizedNetwork->GetGraph();
418 
419  // Walk graph to determine the order of execution.
420  if (graph.GetNumLayers() < 2)
421  {
422  ARMNN_LOG(warning) << "IRuntime::EnqueueWorkload()::Less than two nodes in graph";
423  return Status::Failure;
424  }
425 
426  // Data that must be kept alive for the entire execution of the workload.
427  WorkloadData workloadData(inputTensors, outputTensors);
428 
429  if (graph.GetNumInputs() != inputTensors.size())
430  {
431  throw InvalidArgumentException("Number of inputs provided does not match network.");
432  }
433 
434  // For each input to the network, call EnqueueInput with the data passed by the user.
435  m_InputQueue.clear();
436  m_InputQueue.reserve(graph.GetNumInputs());
437  for (const BindableLayer* inputLayer : graph.GetInputLayers())
438  {
439  const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId());
440  EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
441  }
442 
443  // For each output to the network, call EnqueueOutput with the data passed by the user.
444  m_OutputQueue.clear();
445  m_OutputQueue.reserve(graph.GetNumOutputs());
446  for (const BindableLayer* outputLayer : graph.GetOutputLayers())
447  {
448  const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId());
449  EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
450  }
451 
452  std::unique_ptr<TimelineUtilityMethods> timelineUtils = TimelineUtilityMethods::GetTimelineUtils();
454  if (timelineUtils)
455  {
456  // Add inference timeline trace if profiling is enabled.
457  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
458  timelineUtils->CreateTypedEntity(inferenceGuid, LabelsAndEventClasses::INFERENCE_GUID);
459  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink, networkGuid, inferenceGuid);
460  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
461  }
462 
463  bool executionSucceeded = true;
464 
465  {
466  if (profiling::ProfilingService::Instance().IsProfilingEnabled())
467  {
468  profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::INFERENCES_RUN);
469  }
471  ARMNN_SCOPED_HEAP_PROFILING("Executing");
472  executionSucceeded = Execute(timelineUtils, inferenceGuid);
473  }
474 
475  if (timelineUtils)
476  {
477  // Add end of life of the inference timeline if profiling is enabled.
478  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
479  timelineUtils->Commit();
480  }
481  return executionSucceeded ? Status::Success : Status::Failure;
482 }
static ARMNN_DLLEXPORT ProfilingStaticGuid INFERENCE_GUID
static ProfilingService & Instance()
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils()
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
ProfilingDynamicGuid NextGuid() override
Return the next random Guid in the sequence.
uint32_t IncrementCounterValue(uint16_t counterUid) override
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:169
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_SOL_EVENT_CLASS
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)

◆ FreeWorkingMemory()

void FreeWorkingMemory ( )

Definition at line 660 of file LoadedNetwork.cpp.

References ARMNN_LOG, and armnn::error.

Referenced by Runtime::EnqueueWorkload().

661 {
662  std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
663  if (!m_IsWorkingMemAllocated)
664  {
665  return;
666  }
667  // Informs the memory managers to release memory in it's respective memory group
668  for (auto&& workloadFactory : m_WorkloadFactories)
669  {
670  IBackendInternal::IMemoryManagerSharedPtr memoryManager = workloadFactory.second.second;
671  if (memoryManager)
672  {
673  memoryManager->Release();
674  }
675  }
676  m_TensorHandleFactoryRegistry.ReleaseMemory();
677  m_IsWorkingMemAllocated = false;
678 }
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void ReleaseMemory()
Release memory required for inference.

◆ GetInputTensorInfo()

TensorInfo GetInputTensorInfo ( LayerBindingId  layerId) const

Definition at line 263 of file LoadedNetwork.cpp.

Referenced by Runtime::GetInputTensorInfo().

264 {
265  for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
266  {
267  BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
268  if (inputLayer->GetBindingId() == layerId)
269  {
270  return inputLayer->GetOutputSlot(0).GetTensorInfo();
271  }
272  }
273 
274  throw InvalidArgumentException(boost::str(boost::format("No input layer is associated with id %1%") % layerId));
275 }

◆ GetOutputTensorInfo()

TensorInfo GetOutputTensorInfo ( LayerBindingId  layerId) const

Definition at line 277 of file LoadedNetwork.cpp.

References CHECK_LOCATION, BackendId::Get(), Layer::GetBackendId(), Layer::GetNameStr(), armnn::IgnoreUnused(), armnn::info, and IWorkloadFactory::IsLayerSupported().

Referenced by Runtime::GetOutputTensorInfo().

278 {
279  for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
280  {
281  BOOST_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
282  BOOST_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
283  if (outputLayer->GetBindingId() == layerId)
284  {
285  return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
286  }
287  }
288 
289  throw InvalidArgumentException(boost::str(boost::format("No output layer is associated with id %1%") % layerId));
290 }

◆ GetProfiler()

const std::shared_ptr<Profiler>& GetProfiler ( ) const
inline

Definition at line 51 of file LoadedNetwork.hpp.

51 { return m_Profiler; }

◆ MakeLoadedNetwork()

std::unique_ptr< LoadedNetwork > MakeLoadedNetwork ( std::unique_ptr< OptimizedNetwork net,
std::string &  errorMessage,
const INetworkProperties networkProperties 
)
static

Definition at line 85 of file LoadedNetwork.cpp.

References ARMNN_LOG, armnn::BackendRegistryInstance(), IBackendInternal::CreateMemoryManager(), IBackendInternal::CreateWorkloadFactory(), armnn::error, BackendRegistry::GetFactory(), ProfilerManager::GetInstance(), TimelineUtilityMethods::GetTimelineUtils(), armnn::Input, INetworkProperties::m_ExportEnabled, INetworkProperties::m_ImportEnabled, LabelsAndEventClasses::NETWORK_GUID, armnn::Output, ProfilerManager::RegisterProfiler(), IBackendInternal::RegisterTensorHandleFactories(), IBackendInternal::SupportsTensorAllocatorAPI(), and Graph::TopologicalSort().

Referenced by Runtime::LoadNetwork().

88 {
89  std::unique_ptr<LoadedNetwork> loadedNetwork;
90 
91  auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
92  {
93  errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
94  ARMNN_LOG(error) << errorMessage;
95 
96  return std::unique_ptr<LoadedNetwork>();
97  };
98 
99  try
100  {
101  loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties));
102  }
103  catch (const armnn::RuntimeException& error)
104  {
105  return Fail(error);
106  }
107  catch (const armnn::Exception& error)
108  {
109  return Fail(error);
110  }
111  catch (const std::runtime_error& error)
112  {
113  return Fail(error);
114  }
115 
116  return loadedNetwork;
117 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46

◆ RegisterDebugCallback()

void RegisterDebugCallback ( const DebugCallbackFunction func)

Definition at line 750 of file LoadedNetwork.cpp.

Referenced by Runtime::RegisterDebugCallback().

751 {
752  for (auto&& workloadPtr: m_WorkloadQueue)
753  {
754  workloadPtr.get()->RegisterDebugCallback(func);
755  }
756 }

The documentation for this class was generated from the following files: