ArmNN
 23.11
LoadedNetwork Class Reference

#include <LoadedNetwork.hpp>

Public Types

using WorkloadQueue = std::vector< std::unique_ptr< IWorkload > >
 

Public Member Functions

 ~LoadedNetwork ()
 
std::unique_ptr< IWorkingMemHandleCreateWorkingMemHandle (NetworkId networkId)
 Create a new unique WorkingMemHandle object. More...
 
TensorInfo GetInputTensorInfo (LayerBindingId layerId) const
 
TensorInfo GetOutputTensorInfo (LayerBindingId layerId) const
 
std::vector< ImportedInputIdImportInputs (const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
 
std::vector< ImportedOutputIdImportOutputs (const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
 
void ClearImportedInputs (const std::vector< ImportedInputId > inputIds)
 
void ClearImportedOutputs (const std::vector< ImportedOutputId > outputIds)
 
Status EnqueueWorkload (const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
 Single thread execution of the loaded network. More...
 
Status Execute (const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
 Thread safe execution of the loaded network. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void FreeWorkingMemory ()
 
void RegisterDebugCallback (const DebugCallbackFunction &func)
 
void SendNetworkStructure (arm::pipe::IProfilingService &profilingService)
 
bool IsAsyncEnabled ()
 
arm::pipe::ProfilingGuid GetNetworkGuid ()
 

Static Public Member Functions

static std::unique_ptr< LoadedNetworkMakeLoadedNetwork (std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, arm::pipe::IProfilingService *profilingService)
 

Detailed Description

Definition at line 42 of file LoadedNetwork.hpp.

Member Typedef Documentation

◆ WorkloadQueue

using WorkloadQueue = std::vector<std::unique_ptr<IWorkload> >

Definition at line 45 of file LoadedNetwork.hpp.

Constructor & Destructor Documentation

◆ ~LoadedNetwork()

~LoadedNetwork ( )
inline

Definition at line 47 of file LoadedNetwork.hpp.

48  {
50  }

References LoadedNetwork::FreeWorkingMemory().

Member Function Documentation

◆ ClearImportedInputs()

void ClearImportedInputs ( const std::vector< ImportedInputId inputIds)

Definition at line 1699 of file LoadedNetwork.cpp.

1700 {
1701  for (auto id : inputIds)
1702  {
1703  if (id > m_PreImportedInputHandles.size())
1704  {
1705  throw InvalidArgumentException(fmt::format("ClearImportedInputs::Unknown ImportedInputId: {}", id));
1706  }
1707 
1708  auto& importedTensorHandle = m_PreImportedInputHandles[id].m_TensorHandle;
1709  if (!importedTensorHandle)
1710  {
1711  throw InvalidArgumentException(
1712  fmt::format("ClearImportedInputs::ImportedInput with id: {} has already been deleted", id));
1713  }
1714  // Call Unimport then destroy the tensorHandle
1715  importedTensorHandle->Unimport();
1716  importedTensorHandle = {};
1717  }
1718 }

Referenced by RuntimeImpl::ClearImportedInputs().

◆ ClearImportedOutputs()

void ClearImportedOutputs ( const std::vector< ImportedOutputId outputIds)

Definition at line 1720 of file LoadedNetwork.cpp.

1721 {
1722  for (auto id : outputIds)
1723  {
1724  if (id > m_PreImportedOutputHandles.size())
1725  {
1726  throw InvalidArgumentException(fmt::format("ClearImportedOutputs::Unknown ImportedOutputId: {}", id));
1727  }
1728 
1729  auto& importedTensorHandle = m_PreImportedOutputHandles[id].m_TensorHandle;
1730  if (!importedTensorHandle)
1731  {
1732  throw InvalidArgumentException(
1733  fmt::format("ClearImportedOutputs::ImportedOutput with id: {} has already been deleted", id));
1734  }
1735  // Call Unimport then destroy the tensorHandle
1736  importedTensorHandle->Unimport();
1737  importedTensorHandle = {};
1738  }
1739 }

Referenced by RuntimeImpl::ClearImportedOutputs().

◆ CreateWorkingMemHandle()

std::unique_ptr< IWorkingMemHandle > CreateWorkingMemHandle ( NetworkId  networkId)

Create a new unique WorkingMemHandle object.

Create multiple handles if you wish to have overlapped Execution by calling this function from different threads.

Definition at line 1963 of file LoadedNetwork.cpp.

1964 {
1965  Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
1966 
1967  // Tensors that will need to be allocated internally within armnn
1968  std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles;
1969  // Tensors that will be allocated externally by the user
1970  std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles;
1971 
1972  std::vector<WorkingMemDescriptor> workingMemDescriptors;
1973  std::vector<std::pair<BackendId, ExecutionData>> executionDataVec;
1974 
1975  auto GetTensorHandle = [&](Layer* layer, const OutputSlot& outputSlot)
1976  {
1977  ITensorHandleFactory::FactoryId factoryId = outputSlot.GetTensorHandleFactoryId();
1978  const TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1979 
1980  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
1981  {
1982  BackendId id = layer->GetBackendId();
1984  return m_WorkloadFactories.at(id)->CreateTensorHandle(tensorInfo, false);
1986  }
1987  else
1988  {
1989  ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
1990  ARMNN_ASSERT(handleFactory);
1991  return handleFactory->CreateTensorHandle(tensorInfo, false);
1992  }
1993  };
1994 
1995  struct HandleInfo
1996  {
1997  ITensorHandle* m_TensorHandle;
1998 
1999  bool m_IsInputLayerHandle = false;
2000  bool m_IsOutputLayerHandle = false;
2001 
2002  WorkingMemHandle::InputMemDescriptorCoords m_InputMemDescriptorCoords;
2003  WorkingMemHandle::OutputMemDescriptorCoords m_OutputMemDescriptorCoords;
2004  };
2005 
2006  std::unordered_map<const OutputSlot*, HandleInfo> outputToHandleInfoMap;
2007 
2008  unsigned int layerIndex = 0;
2009  for (auto&& layer : order)
2010  {
2011  // Constant layers execution and management is handled during loaded network construction
2012  if (layer->GetType() == LayerType::Constant)
2013  {
2014  continue;
2015  }
2016 
2017  WorkingMemDescriptor workingMemDescriptor;
2018 
2019  bool isMemoryManaged = true;
2020  bool isInputLayer = false;
2021  bool isOutputLayer = false;
2022  bool isConnectedToOutputLayer = false;
2023 
2024  if (layer->GetType() == LayerType::Input || layer->GetType() == LayerType::MemImport)
2025  {
2026  // Input layers/workloads will not be executed so the descriptor is not added to workingMemDescriptors
2027  // However we will still need to manage the tensorHandle
2028  isInputLayer = true;
2029  isMemoryManaged = m_NetworkProperties.m_InputSource == MemorySource::Undefined;
2030  }
2031  else if (layer->GetType() == LayerType::Output)
2032  {
2033  isOutputLayer = true;
2034  }
2035 
2036  unsigned int slotIndex = 0;
2037  // Create a tensor handle for each output slot of a layer
2038  // Once we create it, we start managing its lifetime
2039  for (auto& slot : layer->GetOutputSlots())
2040  {
2041  for (unsigned int i = 0; i < slot.GetNumConnections(); ++i)
2042  {
2043  if ((slot.GetConnection(i)->GetOwningLayer().GetType() == LayerType::Output))
2044  {
2045  if (!isConnectedToOutputLayer)
2046  {
2047  isConnectedToOutputLayer = true;
2048  // If Export is enabled disable memory management, so we can export, otherwise we do a copy
2049  isMemoryManaged = m_NetworkProperties.m_OutputSource == MemorySource::Undefined;
2050  }
2051  else
2052  {
2053  // Importing in this case would likely cause unexpected behaviour, so we disallow it.
2054  ARMNN_LOG(warning) <<
2055  fmt::format("Layer name: '{0}' guid: '{1}' has two or more OutputLayers connected to it. "
2056  "This will prevent importing on the connected OutputLayers.",
2057  layer->GetName(), layer->GetGuid());
2058  isMemoryManaged = true;
2059  }
2060  }
2061  }
2062 
2063  ITensorHandle* tensorHandle;
2064  if (isMemoryManaged)
2065  {
2066  managedTensorHandles.emplace_back(GetTensorHandle(layer, slot));
2067  tensorHandle = managedTensorHandles.back().get();
2068  }
2069  else
2070  {
2071  unmanagedTensorHandles.emplace_back(GetTensorHandle(layer, slot));
2072  tensorHandle = unmanagedTensorHandles.back().get();
2073  }
2074 
2075  workingMemDescriptor.m_Outputs.push_back(tensorHandle);
2076 
2077  HandleInfo& handleInfo = outputToHandleInfoMap[&slot];
2078  handleInfo.m_TensorHandle = tensorHandle;
2079 
2080  // Store the coordinates of the current layer's OutputSlot that is connected to the OutputLayer
2081  if (isConnectedToOutputLayer)
2082  {
2083  handleInfo.m_IsOutputLayerHandle = true;
2084  handleInfo.m_OutputMemDescriptorCoords.m_OutputSlotCoords = {layerIndex, slotIndex};
2085  }
2086  // Store the LayerBindingId of the InputLayer
2087  if (isInputLayer)
2088  {
2089  handleInfo.m_IsInputLayerHandle = true;
2090  LayerBindingId bindingId = static_cast<BindableLayer*>(layer)->GetBindingId();
2091  handleInfo.m_InputMemDescriptorCoords.m_LayerBindingId = bindingId;
2092  }
2093  slotIndex++;
2094  }
2095  // Loop through the input slots in the same layer and decrement the reference counter associated
2096  // to each tensor handle we encounter.
2097  // Once it reaches zero, the lifetime of the tensor handle has ended, and we mark its memory as available
2098  // so that the next tensor handle with a non overlapping lifetime can share its memory.
2099  for (auto& slot : layer->GetInputSlots())
2100  {
2101  ARMNN_ASSERT(slot.GetConnection());
2102  auto outputSlot = slot.GetConnectedOutputSlot();
2103  auto key = outputSlot->GetOwningLayer().GetGuid();
2104 
2105  // Constant layers execution and management is handled during loaded network construction
2106  auto found = m_ConstantTensorHandles.find(key);
2107  if (found != m_ConstantTensorHandles.end())
2108  {
2109  ITensorHandle* tensorHandle = found->second;
2110  if (slot.IsTensorInfoOverridden())
2111  {
2112  ITensorHandle* decorated = tensorHandle->DecorateTensorHandle(slot.GetTensorInfo()).get();
2113  if (decorated)
2114  {
2115  tensorHandle = decorated;
2116  }
2117  }
2118  workingMemDescriptor.m_Inputs.push_back(tensorHandle);
2119 
2120  // Odd case where a constant layer is connected to an output layer
2121  // We will need to create a HandleInfo to track it
2122  if (isOutputLayer)
2123  {
2124  LayerBindingId bindingId = static_cast<BindableLayer*>(layer)->GetBindingId();
2125 
2126  HandleInfo& handleInfo = outputToHandleInfoMap[outputSlot];
2127  handleInfo.m_TensorHandle = tensorHandle;
2128  handleInfo.m_IsOutputLayerHandle = true;
2129  handleInfo.m_OutputMemDescriptorCoords.m_LayerBindingIds.push_back(bindingId);
2130  handleInfo.m_OutputMemDescriptorCoords.m_InputSlotCoords.push_back({layerIndex, 0});
2131  }
2132  continue;
2133  }
2134 
2135  HandleInfo& handleInfo = outputToHandleInfoMap.at(outputSlot);
2136 
2137  ITensorHandle* inputTensorHandle = handleInfo.m_TensorHandle;
2138  if (slot.IsTensorInfoOverridden())
2139  {
2140  ITensorHandle* decorated = inputTensorHandle->DecorateTensorHandle(slot.GetTensorInfo()).get();
2141  if (decorated)
2142  {
2143  inputTensorHandle = decorated;
2144  }
2145  }
2146  workingMemDescriptor.m_Inputs.push_back(inputTensorHandle);
2147 
2148  // Store the LayerBindingId of the OutputLayer
2149  if (isOutputLayer)
2150  {
2151  LayerBindingId bindingId = static_cast<BindableLayer*>(layer)->GetBindingId();
2152  handleInfo.m_OutputMemDescriptorCoords.m_LayerBindingIds.push_back(bindingId);
2153  handleInfo.m_OutputMemDescriptorCoords.m_InputSlotCoords.push_back({layerIndex, 0});
2154  }
2155  // In this case the layer is not an Output Layer but shares its input tensorhandle with an OutputLayer
2156  // It will need to be updated as well, if we swap out the tensorhandle
2157  else if (handleInfo.m_IsOutputLayerHandle)
2158  {
2159  handleInfo.m_OutputMemDescriptorCoords.m_InputSlotCoords.push_back({layerIndex, slot.GetSlotIndex()});
2160  }
2161 
2162  // Store the coordinates of the InputSlots connected to the InputLayer
2163  // There can be more than one InputSlot connected to an InputLayer, so we use a vector
2164  if (handleInfo.m_IsInputLayerHandle)
2165  {
2166  std::pair<LayerGuid, unsigned int> connectionLocation{layerIndex, slot.GetSlotIndex()};
2167  handleInfo.m_InputMemDescriptorCoords.m_InputSlotCoords.emplace_back(connectionLocation);
2168  }
2169  }
2170 
2171  // Input/Output layers/workloads will not be executed, so the descriptor is not added to workingMemDescriptors
2172  // However we will still need to manage the tensorHandle
2173  if (!isInputLayer)
2174  {
2175  // Simply auto initialise ExecutionData here, so it's added only for the layer that require execution.
2176  // The memory and data will be allocated/assigned for the void* in WorkingMemHandle::Allocate.
2177  std::pair<BackendId, ExecutionData> dataPair;
2178  dataPair.first = layer->GetBackendId();
2179 
2180  executionDataVec.push_back(dataPair);
2181  workingMemDescriptors.push_back(workingMemDescriptor);
2182 
2183  layerIndex++;
2184  }
2185  }
2186 
2187  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory;
2188 
2189  auto externalMemoryManager = CreateExternalMemoryManger(tensorMemory);
2190 
2191  // Sort m_TensorMemory, so it's order matches the outputSlot order
2192  std::sort(tensorMemory.begin(), tensorMemory.end(),
2193  [](const std::pair<std::shared_ptr<TensorMemory>, MemorySource>& lhs,
2194  const std::pair<std::shared_ptr<TensorMemory>, MemorySource>& rhs)
2195  {
2196  return lhs.first->m_OutputSlotId < rhs.first->m_OutputSlotId;
2197  });
2198 
2199  std::vector<WorkingMemHandle::InputMemDescriptorCoords> inputConnectionsInfo;
2200  std::vector<WorkingMemHandle::OutputMemDescriptorCoords> outputConnectionsInfo;
2201 
2202  for (const auto& handleInfo: outputToHandleInfoMap)
2203  {
2204  if (handleInfo.second.m_IsOutputLayerHandle)
2205  {
2206  outputConnectionsInfo.emplace_back(handleInfo.second.m_OutputMemDescriptorCoords);
2207  }
2208 
2209  if (handleInfo.second.m_IsInputLayerHandle)
2210  {
2211  inputConnectionsInfo.emplace_back(handleInfo.second.m_InputMemDescriptorCoords);
2212  }
2213  }
2214 
2215  return std::make_unique<WorkingMemHandle>(networkId,
2216  inputConnectionsInfo,
2217  outputConnectionsInfo,
2218  workingMemDescriptors,
2219  std::move(externalMemoryManager),
2220  std::move(tensorMemory),
2221  std::move(managedTensorHandles),
2222  std::move(unmanagedTensorHandles),
2223  executionDataVec,
2224  &m_Backends);
2225 }

References ARMNN_ASSERT, ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, ITensorHandleFactory::CreateTensorHandle(), Layer::GetBackendId(), TensorHandleFactoryRegistry::GetFactory(), and ITensorHandleFactory::LegacyFactoryId.

Referenced by RuntimeImpl::CreateWorkingMemHandle().

◆ EnqueueWorkload()

Status EnqueueWorkload ( const InputTensors inputTensors,
const OutputTensors outputTensors,
std::vector< ImportedInputId preImportedInputIds = {},
std::vector< ImportedOutputId preImportedOutputIds = {} 
)

Single thread execution of the loaded network.

Definition at line 851 of file LoadedNetwork.cpp.

855 {
856  const Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
857 
858  // Walk graph to determine the order of execution.
859  if (graph.GetNumLayers() < 2)
860  {
861  ARMNN_LOG(warning) << "IRuntime::EnqueueWorkload()::Less than two nodes in graph";
862  return Status::Failure;
863  }
864 
865  // Data that must be kept alive for the entire execution of the workload.
866  WorkloadData workloadData(inputTensors, outputTensors);
867 
868  // Input tensors can be provided as parameters or pre imported. Either way the number of
869  // tensors should match the number of inputs.
870  if (graph.GetNumInputs() != (inputTensors.size() + preImportedInputIds.size()))
871  {
872  throw InvalidArgumentException("Number of inputs provided does not match network.");
873  }
874 
875  // For each input to the network, call EnqueueInput with the data passed by the user.
876  {
878  m_InputQueue.clear();
879  m_InputQueue.reserve(graph.GetNumInputs());
880 
881  unsigned int inputIndex = 0;
882  unsigned int importedInputIdIndex = 0;
883  std::sort(preImportedInputIds.begin(), preImportedInputIds.end());
884  for (const BindableLayer* inputLayer : graph.GetInputLayers())
885  {
886  if (importedInputIdIndex < preImportedInputIds.size() &&
887  inputIndex == preImportedInputIds[importedInputIdIndex])
888  {
889  // Only replace tensorhandles if they have not already been replaced
890  if (!m_IsInputImported[inputIndex])
891  {
892  auto outputTensorHandle = m_PreImportedInputHandles[inputIndex].m_TensorHandle.get();
893 
894  for (const auto& workloadInfo: m_InputWorkloadSlotPairs[inputLayer->GetBindingId()])
895  {
896  auto workload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
897  workload->ReplaceInputTensorHandle(outputTensorHandle, workloadInfo.m_SlotIndex);
898  }
899  m_IsInputImported[inputIndex] = true;
900  }
901  importedInputIdIndex++;
902  }
903  else
904  {
905  if (m_IsInputImported[inputIndex])
906  {
907  OutputHandler& handler = const_cast<OutputHandler&>(inputLayer->GetOutputHandler(0));
908 
909  for (const auto& workloadInfo: m_InputWorkloadSlotPairs[inputLayer->GetBindingId()])
910  {
911  auto workload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
912  workload->ReplaceInputTensorHandle(handler.GetData(), workloadInfo.m_SlotIndex);
913  }
914 
915  m_IsInputImported[inputIndex] = false;
916  }
917 
918  // InputTensorHandle is not imported yet, process to enqueue input
919  const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId());
920  EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
921  }
922  inputIndex++;
923  }
924  }
925  // For each output to the network, call EnqueueOutput with the data passed by the user.
926  {
928  m_OutputQueue.clear();
929  m_OutputQueue.reserve(graph.GetNumOutputs());
930 
931  if (preImportedOutputIds.size() > graph.GetNumOutputs())
932  {
933  throw InvalidArgumentException("Invalid number of preImportedOutputIds");
934  }
935 
936  unsigned int outputIndex = 0;
937  unsigned int importedOutputIdIndex = 0;
938  std::sort(preImportedOutputIds.begin(), preImportedOutputIds.end());
939  for (const BindableLayer* outputLayer : graph.GetOutputLayers())
940  {
941  if (importedOutputIdIndex < preImportedOutputIds.size() &&
942  outputIndex == preImportedOutputIds[importedOutputIdIndex])
943  {
944  // Only replace tensorhandles if they have not already been replaced
945  ITensorHandle* inputTensorHandle = m_PreImportedOutputHandles[outputIndex].m_TensorHandle.get();
946 
947  if (!m_IsOutputImported[outputIndex])
948  {
949  const auto bindingId = outputLayer->GetBindingId();
950  const auto& indices = m_OutputWorkloadSlotPairs[bindingId];
951 
952  auto outputWorkload = m_WorkloadQueue[indices.m_OutputSlotIndices.m_WorkloadIndex].get();
953 
954  outputWorkload->ReplaceOutputTensorHandle(inputTensorHandle,
955  indices.m_OutputSlotIndices.m_SlotIndex);
956 
957  for (const auto& workloadInfo: indices.m_InputSlotIndices)
958  {
959  auto inputWorkload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
960  inputWorkload->ReplaceInputTensorHandle(inputTensorHandle, workloadInfo.m_SlotIndex);
961  }
962  m_IsOutputImported[outputIndex] = true;
963  }
964 
965  ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
966  MemSyncQueueDescriptor syncDesc;
967  syncDesc.m_Inputs.push_back(inputTensorHandle);
968  WorkloadInfo info;
969  info.m_InputTensorInfos.push_back(
970  outputLayer->GetInputSlot(0).GetTensorInfo());
971  auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
972  ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
973  m_OutputQueue.push_back(std::move(syncWorkload));
974  importedOutputIdIndex++;
975  }
976  else
977  {
978  if (m_IsOutputImported[outputIndex])
979  {
980  const auto bindingId = outputLayer->GetBindingId();
981  const auto& indices = m_OutputWorkloadSlotPairs[bindingId];
982 
983  auto outputWorkload = m_WorkloadQueue[indices.m_OutputSlotIndices.m_WorkloadIndex].get();
984  const OutputHandler& outputHandler =
985  outputLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOutputHandler();
986 
987  outputWorkload->ReplaceOutputTensorHandle(
988  outputHandler.GetData(), indices.m_OutputSlotIndices.m_SlotIndex);
989 
990  for (const auto& workloadInfo: indices.m_InputSlotIndices)
991  {
992  auto inputWorkload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
993  inputWorkload->ReplaceInputTensorHandle(outputHandler.GetData(), workloadInfo.m_SlotIndex);
994  }
995  m_IsOutputImported[outputIndex] = false;
996  }
997 
998  const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId());
999  // OutputTensorHandle is not imported yet, process to enqueue Output
1000  EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
1001  }
1002  outputIndex++;
1003  }
1004  }
1005 
1006  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
1007  TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
1008  ProfilingGuid inferenceGuid = m_ProfilingService->GetNextGuid();
1009  if (timelineUtils)
1010  {
1011  // Add inference timeline trace if profiling is enabled.
1012  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
1013  timelineUtils->CreateTypedEntity(inferenceGuid, LabelsAndEventClasses::INFERENCE_GUID);
1014  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink,
1015  networkGuid,
1016  inferenceGuid,
1017  LabelsAndEventClasses::EXECUTION_OF_GUID);
1018  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
1019  }
1020 
1021  bool executionSucceeded = true;
1022 
1023  {
1024  if (m_ProfilingService->IsProfilingEnabled())
1025  {
1026  m_ProfilingService->IncrementCounterValue(INFERENCES_RUN);
1027  }
1029  ARMNN_SCOPED_HEAP_PROFILING("Executing");
1030  executionSucceeded = Execute(timelineUtils, inferenceGuid);
1031  }
1032 
1033  if (timelineUtils)
1034  {
1035  // Add end of life of the inference timeline if profiling is enabled.
1036  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
1037  timelineUtils->Commit();
1038  }
1039 
1040  return executionSucceeded ? Status::Success : Status::Failure;
1041 }

References ARMNN_ASSERT_MSG, ARMNN_LOG, ARMNN_SCOPED_PROFILING_EVENT, armnn::Failure, OutputHandler::GetData(), Graph::GetInputLayers(), Graph::GetNumInputs(), Graph::GetNumLayers(), Graph::GetNumOutputs(), Graph::GetOutputLayers(), armnn::info, QueueDescriptor::m_Inputs, armnn::Undefined, and armnn::warning.

Referenced by RuntimeImpl::EnqueueWorkload().

◆ Execute()

Status Execute ( const InputTensors inputTensors,
const OutputTensors outputTensors,
IWorkingMemHandle workingMemHandle,
std::vector< ImportedInputId preImportedInputs = {},
std::vector< ImportedOutputId preImportedOutputs = {} 
)

Thread safe execution of the loaded network.

Definition at line 1741 of file LoadedNetwork.cpp.

1746 {
1747  const Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
1748 
1749  if (inputTensors.size() + preImportedInputs.size() != graph.GetNumInputs())
1750  {
1751  if (preImportedInputs.empty())
1752  {
1753  throw InvalidArgumentException("LoadedNetwork::Execute: Number of inputs provided does not match network.");
1754  }
1755  else
1756  {
1757  throw InvalidArgumentException("LoadedNetwork::Execute: "
1758  "Number of inputs + preImportedInputs provided does not match network.");
1759  }
1760  }
1761 
1762  if (outputTensors.size() + preImportedOutputs.size() != graph.GetNumOutputs())
1763  {
1764  if (preImportedOutputs.empty())
1765  {
1766  throw InvalidArgumentException("LoadedNetwork::Execute: "
1767  "Number of outputs provided does not match network.");
1768  }
1769  else
1770  {
1771  throw InvalidArgumentException("LoadedNetwork::Execute: "
1772  "Number of outputs + preImportedOutputs provided does not match network.");
1773  }
1774  }
1775 
1776  WorkingMemHandle& workingMemHandle = dynamic_cast<WorkingMemHandle&>(iWorkingMemHandle);
1777  // Collect all the given LayerBindingIds and check them for duplicates and unknowns.
1778  std::vector<LayerBindingId>& bindingIds = workingMemHandle.GetBindingIdVector();
1779  unsigned int index = 0;
1780  for (auto pair : inputTensors)
1781  {
1782  bindingIds[index++] = pair.first;
1783  }
1784  for (ImportedInputId id : preImportedInputs)
1785  {
1786  bindingIds[index++] = ValidateImportedInputID(id);
1787  }
1788  for (auto pair : outputTensors)
1789  {
1790  bindingIds[index++] = pair.first;
1791  }
1792  for (ImportedOutputId id : preImportedOutputs)
1793  {
1794  bindingIds[index++] = ValidateImportedOutputID(id);
1795  }
1796 
1797  workingMemHandle.ValidateBindingIds();
1798 
1799  auto resetMemHandle = [&]()
1800  {
1801  for (ImportedInputId id: preImportedInputs)
1802  {
1803  const LayerBindingId layerBindingId = m_PreImportedInputHandles[id].m_LayerBindingId;
1804 
1805  auto inputHandle = workingMemHandle.GetInputHandle(layerBindingId);
1806  auto inputConnections = workingMemHandle.GetInputConnections(layerBindingId);
1807  for (auto it : inputConnections)
1808  {
1809  *it = inputHandle;
1810  }
1811  }
1812 
1813  for (ImportedOutputId id: preImportedOutputs)
1814  {
1815  const LayerBindingId layerBindingId = m_PreImportedOutputHandles[id].m_LayerBindingId;
1816 
1817  auto outputHandle = workingMemHandle.GetOutputHandle(layerBindingId);
1818  auto outputConnections = workingMemHandle.GetOutputConnection(layerBindingId);
1819 
1820  for (auto it : outputConnections)
1821  {
1822  *it = outputHandle;
1823  }
1824  }
1825  };
1826 
1827  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
1828  TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
1829  ProfilingGuid inferenceGuid = m_ProfilingService->GetNextGuid();
1830  if (timelineUtils)
1831  {
1832  // Add inference timeline trace if profiling is enabled.
1833  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
1834  timelineUtils->CreateTypedEntity(inferenceGuid,LabelsAndEventClasses::INFERENCE_GUID);
1835  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink,
1836  networkGuid,
1837  inferenceGuid,
1838  LabelsAndEventClasses::EXECUTION_OF_GUID);
1839  timelineUtils->RecordEvent(inferenceGuid,LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
1840  }
1841 
1842  bool executionSucceeded = true;
1843 
1844  if (timelineUtils)
1845  {
1846  // Add end of life of the inference timeline if profiling is enabled.
1847  timelineUtils->RecordEvent(inferenceGuid,LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
1848  timelineUtils->Commit();
1849  }
1850 
1851  if (!workingMemHandle.IsAllocated())
1852  {
1853  workingMemHandle.Allocate();
1854  }
1855 
1856  {
1858  for (auto pair : inputTensors)
1859  {
1860  EnqueueInput(pair.second, workingMemHandle.GetInputHandle(pair.first));
1861  }
1862 
1863  // Swap in the pre-imported inputs if any
1864  for (ImportedInputId id : preImportedInputs)
1865  {
1866  const ImportedTensorHandlePin& importedInputPin = m_PreImportedInputHandles[id];
1867  const LayerBindingId layerBindingId = m_PreImportedInputHandles[id].m_LayerBindingId;
1868  const auto& preimportedHandle = importedInputPin.m_TensorHandle;
1869 
1870  auto inputConnections = workingMemHandle.GetInputConnections(layerBindingId);
1871  for (auto it : inputConnections)
1872  {
1873  *it = preimportedHandle.get();
1874  }
1875  }
1876  }
1877  {
1879  if (m_NetworkProperties.m_OutputSource != MemorySource::Undefined)
1880  {
1881  for (auto pair: outputTensors)
1882  {
1883  ImportOutputTensor(pair.second, workingMemHandle.GetOutputHandle(pair.first));
1884  }
1885  }
1886 
1887  for (ImportedOutputId id : preImportedOutputs)
1888  {
1889  const ImportedTensorHandlePin& importedOutputPin = m_PreImportedOutputHandles[id];
1890  const LayerBindingId layerBindingId = m_PreImportedOutputHandles[id].m_LayerBindingId;
1891  const auto& preimportedHandle = importedOutputPin.m_TensorHandle;
1892 
1893  auto outputConnections = workingMemHandle.GetOutputConnection(layerBindingId);
1894  for (auto it : outputConnections)
1895  {
1896  *it = preimportedHandle.get();
1897  }
1898  }
1899  }
1900 
1901  auto Fail = [&](const std::exception& error)
1902  {
1903  ARMNN_LOG(error) << "An error occurred attempting to execute a workload: " << error.what();
1904  executionSucceeded = false;
1905  };
1906  ProfilingDynamicGuid workloadInferenceID(0);
1907 
1908  try
1909  {
1910  for (unsigned int i = 0; i < m_WorkloadQueue.size(); ++i)
1911  {
1912  auto& workload = m_WorkloadQueue[i];
1913  if (timelineUtils)
1914  {
1915  workloadInferenceID = timelineUtils->RecordWorkloadInferenceAndStartOfLifeEvent(workload->GetGuid(),
1916  inferenceGuid);
1917  }
1918 
1919  workload->ExecuteAsync(workingMemHandle.GetExecutionDataAt(i).second);
1920 
1921  if (timelineUtils)
1922  {
1923  timelineUtils->RecordEndOfLifeEvent(workloadInferenceID);
1924  }
1925  }
1926  }
1927  catch (const RuntimeException& error)
1928  {
1929  resetMemHandle();
1930  Fail(error);
1931  }
1932  catch (const std::runtime_error& error)
1933  {
1934  resetMemHandle();
1935  Fail(error);
1936  }
1937  catch (...)
1938  {
1939  resetMemHandle();
1940  throw;
1941  }
1942 
1943  if (m_NetworkProperties.m_OutputSource == MemorySource::Undefined)
1944  {
1945  for (auto pair: outputTensors)
1946  {
1947  CopyToOutputTensor(pair.second, workingMemHandle.GetOutputHandle(pair.first));
1948  }
1949  }
1950  else
1951  {
1952  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "SyncMemGeneric_Execute");
1953  workingMemHandle.MemSyncOutputs();
1954  }
1955 
1956  resetMemHandle();
1957 
1958  return executionSucceeded ? Status::Success : Status::Failure;
1959 }

References WorkingMemHandle::Allocate(), ARMNN_LOG, ARMNN_SCOPED_PROFILING_EVENT, armnn::CopyToOutputTensor(), armnn::error, armnn::Failure, WorkingMemHandle::GetBindingIdVector(), WorkingMemHandle::GetExecutionDataAt(), WorkingMemHandle::GetInputConnections(), WorkingMemHandle::GetInputHandle(), Graph::GetNumInputs(), Graph::GetNumOutputs(), WorkingMemHandle::GetOutputConnection(), WorkingMemHandle::GetOutputHandle(), WorkingMemHandle::IsAllocated(), INetworkProperties::m_OutputSource, WorkingMemHandle::MemSyncOutputs(), armnn::Success, armnn::Undefined, and WorkingMemHandle::ValidateBindingIds().

Referenced by RuntimeImpl::Execute().

◆ FreeWorkingMemory()

void FreeWorkingMemory ( )

Definition at line 1234 of file LoadedNetwork.cpp.

1235 {
1236 #if !defined(ARMNN_DISABLE_THREADS)
1237  std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
1238 #endif
1239 
1240  if (!m_IsWorkingMemAllocated)
1241  {
1242  return;
1243  }
1244 
1245  if (m_ExternalMemoryManager)
1246  {
1247  m_ExternalMemoryManager->Deallocate();
1248  }
1249 
1250  // Informs the memory managers to release memory in its respective memory group
1251  for (auto&& memoryManager : m_BackendMemoryMangers)
1252  {
1253  if (memoryManager)
1254  {
1255  memoryManager->Release();
1256  }
1257  }
1258  m_TensorHandleFactoryRegistry.ReleaseMemory();
1259  m_IsWorkingMemAllocated = false;
1260 }

References TensorHandleFactoryRegistry::ReleaseMemory().

Referenced by RuntimeImpl::CreateWorkingMemHandle(), RuntimeImpl::EnqueueWorkload(), and LoadedNetwork::~LoadedNetwork().

◆ GetInputTensorInfo()

TensorInfo GetInputTensorInfo ( LayerBindingId  layerId) const

Definition at line 709 of file LoadedNetwork.cpp.

710 {
711  for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
712  {
713  ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
714  if (inputLayer->GetBindingId() == layerId)
715  {
716  return inputLayer->GetOutputSlot(0).GetTensorInfo();
717  }
718  }
719 
720  throw InvalidArgumentException(fmt::format("No input layer is associated with id {}", layerId));
721 }

References ARMNN_ASSERT_MSG.

Referenced by RuntimeImpl::GetInputTensorInfo().

◆ GetNetworkGuid()

ProfilingGuid GetNetworkGuid ( )

Definition at line 704 of file LoadedNetwork.cpp.

705 {
706  return m_OptimizedNetwork->GetGuid();
707 }

◆ GetOutputTensorInfo()

TensorInfo GetOutputTensorInfo ( LayerBindingId  layerId) const

Definition at line 723 of file LoadedNetwork.cpp.

724 {
725  for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
726  {
727  ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
728  ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
729  if (outputLayer->GetBindingId() == layerId)
730  {
731  return outputLayer->GetInputSlot(0).GetTensorInfo();
732  }
733  }
734 
735  throw InvalidArgumentException(fmt::format("No output layer is associated with id {}", layerId));
736 }

References ARMNN_ASSERT_MSG.

Referenced by RuntimeImpl::GetOutputTensorInfo().

◆ GetProfiler()

const std::shared_ptr<IProfiler>& GetProfiler ( ) const
inline

Definition at line 87 of file LoadedNetwork.hpp.

87 { return m_OptimizedNetwork->GetProfiler(); }

Referenced by RuntimeImpl::CreateWorkingMemHandle(), RuntimeImpl::EnqueueWorkload(), and RuntimeImpl::Execute().

◆ ImportInputs()

std::vector< ImportedInputId > ImportInputs ( const InputTensors inputTensors,
MemorySource  forceImportMemorySource = MemorySource::Undefined 
)

Definition at line 1430 of file LoadedNetwork.cpp.

1432 {
1433  if (!m_NetworkProperties.m_AsyncEnabled)
1434  {
1435  // Cannot import if import is not enabled and forceImportMemorySource is undefined
1436  if (forceImportMemorySource == MemorySource::Undefined)
1437  {
1438  throw MemoryImportException("ImportInputs: Memory Import failed, NetworkProperties.m_ImportEnabled");
1439  }
1440  // The number of pre imported tensors should not exceed the number of inputs.
1441  if (inputTensors.size() > m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetNumInputs())
1442  {
1443  throw MemoryImportException("ImportInputs: The number of tensors provided exceeds the number of inputs.");
1444  }
1445 
1446  std::vector<ImportedInputId> importedInputs;
1447  Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
1448  unsigned int inputIndex = 0;
1449  for (const BindableLayer* inputLayer : graph.GetInputLayers())
1450  {
1451  auto outputTensorHandle = m_PreImportedInputHandles[inputIndex].m_TensorHandle.get();
1452 
1453  if (!outputTensorHandle)
1454  {
1455  inputIndex++;
1456  continue;
1457  }
1458 
1459  auto layerBindingId = inputLayer->GetBindingId();
1460  auto it = std::find_if(inputTensors.begin(), inputTensors.end(), [=](const auto& inputTensor)
1461  {
1462  return inputTensor.first == layerBindingId;
1463  });
1464 
1465  if (it == inputTensors.end())
1466  {
1467  inputIndex++;
1468  continue;
1469  }
1470 
1471  const auto& inputTensor = *it;
1472  std::unique_ptr<ITensorHandle> passThroughTensorHandle =
1473  std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
1474  inputTensor.second.GetMemoryArea());
1475 
1476  try
1477  {
1478  if (outputTensorHandle->CanBeImported(passThroughTensorHandle->Map(), forceImportMemorySource)
1479  && (outputTensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)))
1480  {
1481  importedInputs.push_back(inputIndex);
1482  }
1483  passThroughTensorHandle->Unmap();
1484  }
1485  catch(const MemoryImportException& exception)
1486  {
1487  ARMNN_LOG(error) << "An error occurred attempting to import input_"
1488  << inputIndex << " : " << exception.what();
1489  passThroughTensorHandle->Unmap();
1490  }
1491  inputIndex++;
1492  }
1493 
1494  return importedInputs;
1495  }
1496  else
1497  {
1498  // Import when the import of network properties is enabled
1499  std::vector<ImportedInputId> importedInputs;
1500  Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
1501 
1502  for (auto inputTensor : inputTensors)
1503  {
1504  auto layerBindingId = inputTensor.first;
1505  auto it = std::find_if(graph.GetInputLayers().begin(), graph.GetInputLayers().end(), [=](auto* layer)
1506  {
1507  return layer->GetBindingId() == layerBindingId;
1508  });
1509 
1510  if (it == graph.GetInputLayers().end())
1511  {
1512  throw MemoryImportException(fmt::format(
1513  "ImportInputs: Memory Import failed, unknown LayerBindingId: {}", layerBindingId));
1514  }
1515 
1516  const Layer* layer = *it;
1517  if (layer->GetType() != LayerType::Input)
1518  {
1519  throw InvalidArgumentException("ImportInputs: given layer not an InputLayer");
1520  }
1521 
1522  auto& backend = m_Backends.at(layer->GetBackendId());
1523  if (!HasMatchingCapability(BackendOptions::BackendOption{"PreImportIOTensors", true},
1524  backend->GetCapabilities()))
1525  {
1526  std::string er = backend->GetId();
1527  er += " does not have PreImportIOTensors capability";
1528  throw BackendCapabilityException(er);
1529  }
1530 
1531  const OutputSlot& outputSlot = layer->GetOutputSlots()[0];
1532 
1533  ITensorHandleFactory::FactoryId factoryId = outputSlot.GetTensorHandleFactoryId();
1534  const TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1535 
1536  ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
1537  ARMNN_ASSERT(handleFactory);
1538 
1539  ImportedTensorHandlePin importedTensorHandlePin{layerBindingId,
1540  handleFactory->CreateTensorHandle(tensorInfo, false)};
1541 
1542  ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
1543 
1544  if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
1545  {
1546  throw MemoryImportException(
1547  fmt::format("ImportInputs: Memory Import failed, backend: "
1548  "{} does not support importing from source {}"
1549  , factoryId, m_NetworkProperties.m_InputSource));
1550  }
1551 
1552  std::unique_ptr<ITensorHandle> passThroughTensorHandle =
1553  std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
1554  inputTensor.second.GetMemoryArea());
1555 
1556  if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource))
1557  {
1558  importedInputs.push_back(m_CurImportedInputId++);
1559  passThroughTensorHandle->Unmap();
1560  }
1561  else
1562  {
1563  passThroughTensorHandle->Unmap();
1564  throw MemoryImportException("ImportInputs: Memory Import failed");
1565  }
1566 
1567  m_PreImportedInputHandles.push_back(std::move(importedTensorHandlePin));
1568  }
1569  return importedInputs;
1570  }
1571 }

References ARMNN_ASSERT, ARMNN_LOG, Graph::InputLayersAccessor::begin(), armnn::CheckFlag(), ITensorHandleFactory::CreateTensorHandle(), Graph::InputLayersAccessor::end(), armnn::error, Layer::GetBackendId(), TensorHandleFactoryRegistry::GetFactory(), ITensorHandle::GetImportFlags(), Graph::GetInputLayers(), Layer::GetOutputSlots(), OutputSlot::GetTensorHandleFactoryId(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::HasMatchingCapability(), ITensorHandle::Import(), armnn::Input, INetworkProperties::m_AsyncEnabled, INetworkProperties::m_InputSource, armnn::Undefined, and Exception::what().

Referenced by RuntimeImpl::ImportInputs().

◆ ImportOutputs()

std::vector< ImportedOutputId > ImportOutputs ( const OutputTensors outputTensors,
MemorySource  forceImportMemorySource = MemorySource::Undefined 
)

Definition at line 1573 of file LoadedNetwork.cpp.

1575 {
1576  if (!m_NetworkProperties.m_AsyncEnabled)
1577  {
1578  // Cannot import if import is not enabled and forceImportMemorySource is undefined
1579  if (forceImportMemorySource == MemorySource::Undefined)
1580  {
1581  throw MemoryImportException("ImportOutputs: Memory Import failed, NetworkProperties.m_ImportEnabled");
1582  }
1583  // If forceImportMemorySource is defined, try import if memory is aligned
1584  if (outputTensors.size() != m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetNumOutputs())
1585  {
1586  throw MemoryImportException("ImportOutputs: Force Import failed, incorrect number of tensors");
1587  }
1588  std::vector<ImportedOutputId> importedOutputs;
1589  Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
1590 
1591  unsigned int outputIndex = 0;
1592  for (const BindableLayer* const outputLayer : graph.GetOutputLayers())
1593  {
1594  auto inputTensorHandle = m_PreImportedOutputHandles[outputIndex].m_TensorHandle.get();
1595  if (!inputTensorHandle)
1596  {
1597  outputIndex++;
1598  continue;
1599  }
1600 
1601  auto layerBindingId = outputLayer->GetBindingId();
1602  auto it = std::find_if(outputTensors.begin(), outputTensors.end(), [=] (const auto& outputTensor)
1603  {
1604  return outputTensor.first == layerBindingId;
1605  });
1606 
1607  if (it == outputTensors.end())
1608  {
1609  outputIndex++;
1610  continue;
1611  }
1612 
1613  const auto outputTensor = *it;
1614  try
1615  {
1616  // Check if the output memory can be imported
1617  if (inputTensorHandle->CanBeImported(outputTensor.second.GetMemoryArea(), forceImportMemorySource)
1618  && inputTensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
1619  {
1620  importedOutputs.push_back(outputIndex);
1621  }
1622  }
1623  catch(const MemoryImportException& exception)
1624  {
1625  ARMNN_LOG(error) << "An error occurred attempting to import output_"
1626  << outputIndex << " : " << exception.what();
1627  }
1628  outputIndex++;
1629  }
1630  return importedOutputs;
1631  }
1632 
1633  std::vector<ImportedOutputId> importedOutputs;
1634  Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
1635 
1636  for (const auto& outputTensor : outputTensors)
1637  {
1638  auto layerBindingId = outputTensor.first;
1639  auto it = std::find_if(graph.GetOutputLayers().begin(), graph.GetOutputLayers().end(), [=](auto* layer)
1640  {
1641  return layer->GetBindingId() == layerBindingId;
1642  });
1643 
1644  if (it == graph.GetOutputLayers().end())
1645  {
1646  throw MemoryImportException(fmt::format("ImportOutputs: Memory Import failed, unknown LayerBindingId: {}",
1647  layerBindingId));
1648  }
1649 
1650  const Layer* layer = *it;
1651  if (layer->GetType() != LayerType::Output)
1652  {
1653  throw InvalidArgumentException("ImportOutputs: given layer not an OutputLayer");
1654  }
1655 
1656  auto& backend = m_Backends.at(layer->GetBackendId());
1657  if (!HasMatchingCapability(BackendOptions::BackendOption{"PreImportIOTensors", true},
1658  backend->GetCapabilities()))
1659  {
1660  std::string er = backend->GetId();
1661  er += " does not have PreImportIOTensors capability";
1662  throw BackendCapabilityException(er);
1663  }
1664 
1665  const InputSlot& inputSlot = layer->GetInputSlots()[0];
1666  ITensorHandleFactory::FactoryId factoryId = inputSlot.GetConnectedOutputSlot()->GetTensorHandleFactoryId();
1667  const TensorInfo& tensorInfo = inputSlot.GetTensorInfo();
1668 
1669  ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
1670  ARMNN_ASSERT(handleFactory);
1671 
1672  ImportedTensorHandlePin importedTensorHandlePin{layerBindingId,
1673  handleFactory->CreateTensorHandle(tensorInfo, false)};
1674 
1675  ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
1676 
1677  if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
1678  {
1679  throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: "
1680  "{} does not support importing from source {}"
1681  , factoryId, forceImportMemorySource));
1682  }
1683 
1684  if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
1685  {
1686  importedOutputs.push_back(m_CurImportedOutputId++);
1687  }
1688  else
1689  {
1690  throw MemoryImportException("ImportInputs: Memory Import failed");
1691  }
1692 
1693  m_PreImportedOutputHandles.push_back(std::move(importedTensorHandlePin));
1694  }
1695 
1696  return importedOutputs;
1697 }

References ARMNN_ASSERT, ARMNN_LOG, Graph::OutputLayersAccessor::begin(), armnn::CheckFlag(), ITensorHandleFactory::CreateTensorHandle(), Graph::OutputLayersAccessor::end(), armnn::error, Layer::GetBackendId(), InputSlot::GetConnectedOutputSlot(), TensorHandleFactoryRegistry::GetFactory(), ITensorHandle::GetImportFlags(), Layer::GetInputSlots(), Graph::GetOutputLayers(), OutputSlot::GetTensorHandleFactoryId(), InputSlot::GetTensorInfo(), Layer::GetType(), armnn::HasMatchingCapability(), ITensorHandle::Import(), INetworkProperties::m_AsyncEnabled, armnn::Output, armnn::Undefined, and Exception::what().

Referenced by RuntimeImpl::ImportOutputs().

◆ IsAsyncEnabled()

bool IsAsyncEnabled ( )
inline

Definition at line 95 of file LoadedNetwork.hpp.

96  {
97  return m_NetworkProperties.m_AsyncEnabled;
98  }

References INetworkProperties::m_AsyncEnabled.

Referenced by RuntimeImpl::CreateWorkingMemHandle(), RuntimeImpl::EnqueueWorkload(), and RuntimeImpl::Execute().

◆ MakeLoadedNetwork()

std::unique_ptr< LoadedNetwork > MakeLoadedNetwork ( std::unique_ptr< IOptimizedNetwork net,
std::string &  errorMessage,
const INetworkProperties networkProperties,
arm::pipe::IProfilingService *  profilingService 
)
static

Definition at line 170 of file LoadedNetwork.cpp.

174 {
175  std::unique_ptr<LoadedNetwork> loadedNetwork;
176 
177  auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
178  {
179  errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
180  ARMNN_LOG(error) << errorMessage;
181 
182  return std::unique_ptr<LoadedNetwork>();
183  };
184 
185  try
186  {
187  loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties, profilingService));
188  }
189  catch (const armnn::RuntimeException& error)
190  {
191  return Fail(error);
192  }
193  catch (const armnn::Exception& error)
194  {
195  return Fail(error);
196  }
197  catch (const std::runtime_error& error)
198  {
199  return Fail(error);
200  }
201 
202  return loadedNetwork;
203 }

References ARMNN_LOG, and armnn::error.

Referenced by RuntimeImpl::LoadNetwork().

◆ RegisterDebugCallback()

void RegisterDebugCallback ( const DebugCallbackFunction func)

Definition at line 2227 of file LoadedNetwork.cpp.

2228 {
2229  for (auto&& workloadPtr: m_WorkloadQueue)
2230  {
2231  workloadPtr.get()->RegisterDebugCallback(func);
2232  }
2233 }

Referenced by RuntimeImpl::RegisterDebugCallback().

◆ SendNetworkStructure()

void SendNetworkStructure ( arm::pipe::IProfilingService &  profilingService)

Definition at line 666 of file LoadedNetwork.cpp.

667 {
668  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_SendNetworkStructure");
669  Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
670  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
671 
672  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
673  TimelineUtilityMethods::GetTimelineUtils(profilingService);
674 
675  timelineUtils->CreateTypedEntity(networkGuid, LabelsAndEventClasses::NETWORK_GUID);
676 
677  for (auto&& layer : order)
678  {
679  // Add layer to the post-optimisation network structure
680  AddLayerStructure(timelineUtils, *layer, networkGuid);
681  switch (layer->GetType())
682  {
683  case LayerType::Input:
684  case LayerType::Output:
685  {
686  // Inputs and outputs are treated in a special way - see EnqueueInput() and EnqueueOutput().
687  break;
688  }
689  default:
690  {
691  for (auto& workload : m_WorkloadQueue)
692  {
693  // Add workload to the post-optimisation network structure
694  AddWorkloadStructure(timelineUtils, workload, *layer);
695  }
696  break;
697  }
698  }
699  }
700  // Commit to send the post-optimisation network structure
701  timelineUtils->Commit();
702 }

References ARMNN_SCOPED_PROFILING_EVENT, armnn::Input, armnn::Output, and armnn::Undefined.


The documentation for this class was generated from the following files:
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::ImportedInputId
unsigned int ImportedInputId
Definition: Types.hpp:310
armnn::Compute::Undefined
@ Undefined
armnn::INetworkProperties::m_AsyncEnabled
const bool m_AsyncEnabled
Definition: IRuntime.hpp:59
armnn::INetworkProperties::m_InputSource
const MemorySource m_InputSource
Definition: IRuntime.hpp:65
armnn::TensorHandleFactoryRegistry::ReleaseMemory
void ReleaseMemory()
Release memory required for inference.
Definition: TensorHandleFactoryRegistry.cpp:86
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::BoostLogSeverityMapping::error
@ error
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::MemorySource::Undefined
@ Undefined
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::CopyToOutputTensor
void CopyToOutputTensor(const Tensor &outputTensor, ITensorHandle *outputTensorHandle)
Definition: LoadedNetwork.cpp:1388
ARMNN_SCOPED_HEAP_PROFILING
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
Definition: HeapProfiling.hpp:45
armnn::CheckFlag
bool CheckFlag(MemorySourceFlags flags, MemorySource source)
Definition: MemorySources.hpp:41
armnn::Status::Success
@ Success
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::RuntimeException
Definition: Exceptions.hpp:120
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::MemImport
@ MemImport
armnn::ImportedOutputId
unsigned int ImportedOutputId
Definition: Types.hpp:311
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::LoadedNetwork::Execute
Status Execute(const InputTensors &inputTensors, const OutputTensors &outputTensors, IWorkingMemHandle &workingMemHandle, std::vector< ImportedInputId > preImportedInputs={}, std::vector< ImportedOutputId > preImportedOutputs={})
Thread safe execution of the loaded network.
Definition: LoadedNetwork.cpp:1741
armnn::MemorySource
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:244
armnn::LoadedNetwork::FreeWorkingMemory
void FreeWorkingMemory()
Definition: LoadedNetwork.cpp:1234
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::LayerType::Input
@ Input
armnn::INetworkProperties::m_OutputSource
const MemorySource m_OutputSource
Definition: IRuntime.hpp:66
armnn::Status::Failure
@ Failure
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::HasMatchingCapability
bool HasMatchingCapability(const BackendOptions::BackendOption &capability, const BackendCapabilities &capabilities)
Convenience function to check if a given capability matches a capability in a BackendCapabilities str...
Definition: BackendHelper.cpp:85