25 #include <boost/format.hpp> 36 template <
typename ExceptionType>
37 std::string ToErrorMessage(
const char * prefix,
const ExceptionType &
error)
40 ss << prefix <<
" " << error.what();
44 void AddLayerStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
49 std::string layerName = layer.GetNameStr().empty() ?
"<Unnamed>" : layer.GetNameStr();
50 timelineUtils->CreateNamedTypedChildEntity(layer.GetGuid(),
54 for (
auto&& input : layer.GetInputSlots())
56 const IOutputSlot* source = input.GetConnectedOutputSlot();
59 source->GetOwningLayerGuid(),
64 void AddWorkloadStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
65 std::unique_ptr<IWorkload>& workload,
70 timelineUtils->MarkEntityWithLabel(workload->GetGuid(),
71 layer.GetBackendId().Get(),
84 std::string& errorMessage,
88 std::unique_ptr<LoadedNetwork> loadedNetwork;
90 auto Fail = [&](
const std::exception&
error) -> std::unique_ptr<LoadedNetwork>
92 errorMessage = ToErrorMessage(
"An error occurred when preparing the network workloads: ", error);
95 return std::unique_ptr<LoadedNetwork>();
100 loadedNetwork.reset(
new LoadedNetwork(std::move(net), networkProperties, profilingService));
110 catch (
const std::runtime_error& error)
115 return loadedNetwork;
118 LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
121 m_OptimizedNetwork(std::move(net)),
124 m_ProfilingService(profilingService)
127 m_Profiler = std::make_shared<Profiler>();
135 for (
auto&& layer : order)
137 auto const& backendId = layer->GetBackendId();
138 if (m_Backends.count(backendId) == 0)
141 auto it = m_Backends.emplace(std::make_pair(backendId, createBackend()));
150 m_WorkloadFactories.emplace(
151 std::make_pair(backendId, std::make_pair(std::move(workloadFactory),
nullptr)));
158 m_WorkloadFactories.emplace(
159 std::make_pair(backendId, std::make_pair(std::move(workloadFactory), memoryManager)));
164 for (
auto&& layer : order)
166 auto& workloadFactory = GetWorkloadFactory(*layer);
168 switch (layer->GetType())
173 layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory, !m_IsImportEnabled);
180 if((layer->GetNumOutputSlots() == 1) &&
181 (layer->GetOutputSlots()[0].GetNumConnections() == 1) &&
182 (layer->GetOutputSlots()[0].GetConnection(0)->GetOwningLayer().GetType() ==
LayerType::Output))
184 layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory, !m_IsExportEnabled);
188 layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory);
195 std::unique_ptr<TimelineUtilityMethods> timelineUtils =
203 for (
auto&& layer : order)
208 AddLayerStructure(timelineUtils, *layer, networkGuid);
213 switch (layer->GetType())
223 auto workload = layer->CreateWorkload(workloadFactory);
227 const char*
const layerName =
228 layer->GetNameStr().length() != 0 ? layer->GetName() :
"<Unnamed>";
230 boost::format(
"No workload created for layer (name: '%1%' type: '%2%') (compute '%3%')")
231 % layerName % static_cast<int>(layer->GetType()) % layer->GetBackendId().Get()
238 AddWorkloadStructure(timelineUtils, workload, *layer);
241 m_WorkloadQueue.push_back(move(workload));
243 layer->ReleaseConstantData();
252 timelineUtils->Commit();
256 m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers();
259 for (
auto& workload : m_WorkloadQueue)
261 workload->PostAllocationConfigure();
270 std::unique_ptr<TimelineUtilityMethods> timelineUtils =
275 for (
auto&& layer : order)
278 AddLayerStructure(timelineUtils, *layer, networkGuid);
279 switch (layer->GetType())
289 for (
auto& workload : m_WorkloadQueue)
292 AddWorkloadStructure(timelineUtils, workload, *layer);
299 timelineUtils->Commit();
304 for (
auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
306 ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1,
"Input layer should have exactly 1 output slot");
307 if (inputLayer->GetBindingId() == layerId)
309 return inputLayer->GetOutputSlot(0).GetTensorInfo();
318 for (
auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
320 ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1,
"Output layer should have exactly 1 input slot");
321 ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(),
"Input slot on Output layer must be connected");
322 if (outputLayer->GetBindingId() == layerId)
324 return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
335 auto it = m_WorkloadFactories.find(layer.
GetBackendId());
336 if (it == m_WorkloadFactories.end())
340 boost::format(
"No workload factory for %1% to be used for layer: %2%")
346 workloadFactory = it->second.first.get();
350 std::string reasonIfUnsupported;
352 "Factory does not support layer");
354 return *workloadFactory;
364 : m_TensorHandle(std::move(handle))
370 ITensorHandle* GetTensorHandle()
const {
return m_TensorHandle.get(); }
375 std::unique_ptr<ITensorHandle> m_TensorHandle;
381 const std::vector<TensorPin>& pins,
382 char const* bindingPointDesc)
384 auto it = std::find_if(pins.begin(), pins.end(),
385 [id](
const TensorPin& pin)
387 return pin.GetBindingId() == id;
390 if (it != pins.end())
397 boost::format(
"No tensor supplied for %1% %2%") % bindingPointDesc %
id));
407 m_InputTensorPins.reserve(inputTensors.size());
408 m_OutputTensorPins.reserve(outputTensors.size());
410 for (
auto inputTensorPair : inputTensors)
412 auto inputTensor = inputTensorPair.second;
414 std::unique_ptr<ITensorHandle> tensorHandle =
415 std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea());
418 m_InputTensorPins.emplace_back(std::move(tensorHandle), inputTensor.GetInfo(), layerId);
421 for (
auto outputTensorPair : outputTensors)
423 auto outputTensor = outputTensorPair.second;
425 std::unique_ptr<ITensorHandle> tensorHandle =
426 std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea());
429 m_OutputTensorPins.emplace_back(std::move(tensorHandle), outputTensor.GetInfo(), layerId);
435 return GetTensorPin(
id, m_InputTensorPins,
"input");
440 return GetTensorPin(
id, m_OutputTensorPins,
"output");
445 std::vector<TensorPin> m_InputTensorPins;
446 std::vector<TensorPin> m_OutputTensorPins;
456 const Graph& graph = m_OptimizedNetwork->GetGraph();
461 ARMNN_LOG(
warning) <<
"IRuntime::EnqueueWorkload()::Less than two nodes in graph";
466 WorkloadData workloadData(inputTensors, outputTensors);
474 m_InputQueue.clear();
478 const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId());
479 EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
483 m_OutputQueue.clear();
487 const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId());
488 EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
491 std::unique_ptr<TimelineUtilityMethods> timelineUtils =
493 ProfilingGuid inferenceGuid = m_ProfilingService.GetNextGuid();
503 bool executionSucceeded =
true;
506 if (m_ProfilingService.IsProfilingEnabled())
508 m_ProfilingService.IncrementCounterValue(armnn::profiling::INFERENCES_RUN);
512 executionSucceeded = Execute(timelineUtils, inferenceGuid);
519 timelineUtils->Commit();
531 if (tensorHandle ==
nullptr)
539 inputQueueDescriptor.
m_Inputs.push_back(tensorHandle);
544 const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
547 "Data should have been allocated.");
548 inputQueueDescriptor.
m_Outputs.push_back(outputTensorHandle);
552 if (m_IsImportEnabled)
557 void* mem = tensorHandle->
Map(
false);
560 tensorHandle->
Unmap();
563 tensorHandle->
Unmap();
574 std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor,
info);
578 std::unique_ptr<TimelineUtilityMethods> timelineUtils =
583 AddWorkloadStructure(timelineUtils, inputWorkload, layer);
584 timelineUtils->Commit();
587 m_InputQueue.push_back(move(inputWorkload));
598 if (tensorHandle ==
nullptr)
606 outputQueueDescriptor.
m_Outputs.push_back(tensorHandle);
614 const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
616 ARMNN_ASSERT_MSG(inputTensorHandle !=
nullptr,
"Data should have been allocated.");
625 if (m_IsExportEnabled && (layer.
GetInputSlots()[0].GetConnectedOutputSlot()->GetNumConnections() == 1))
632 void *mem = tensorHandle->
Map(
false);
634 tensorHandle->
Unmap();
640 syncDesc.
m_Inputs.push_back(inputTensorHandle);
642 auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc,
info);
644 m_OutputQueue.push_back(move(syncWorkload));
658 throw MemoryExportException(
"EnqueueOutput: Memory Export failed, attempting to export Input Layer");
664 outputQueueDescriptor.
m_Inputs.push_back(inputTensorHandle);
667 std::unique_ptr<IWorkload> outputWorkload =
668 std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor,
info);
671 std::unique_ptr<TimelineUtilityMethods> timelineUtils =
676 AddWorkloadStructure(timelineUtils, outputWorkload, layer);
677 timelineUtils->Commit();
680 m_OutputQueue.push_back(move(outputWorkload));
684 void LoadedNetwork::AllocateWorkingMemory()
686 if (m_IsWorkingMemAllocated)
690 for (
auto&& workloadFactory : m_WorkloadFactories)
695 memoryManager->Acquire();
698 m_TensorHandleFactoryRegistry.AquireMemory();
699 m_IsWorkingMemAllocated =
true;
704 std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
705 if (!m_IsWorkingMemAllocated)
710 for (
auto&& workloadFactory : m_WorkloadFactories)
715 memoryManager->Release();
718 m_TensorHandleFactoryRegistry.ReleaseMemory();
719 m_IsWorkingMemAllocated =
false;
722 bool LoadedNetwork::Execute(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
727 auto Fail = [&](
const std::exception&
error)
729 ARMNN_LOG(error) <<
"An error occurred attempting to execute a workload: " << error.what();
735 std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
736 AllocateWorkingMemory();
739 for (
auto& input : m_InputQueue)
743 workloadInferenceID = timelineUtils->RecordWorkloadInferenceAndStartOfLifeEvent(input->GetGuid(),
749 timelineUtils->RecordEndOfLifeEvent(workloadInferenceID);
753 for (
auto& workload : m_WorkloadQueue)
757 workloadInferenceID = timelineUtils->RecordWorkloadInferenceAndStartOfLifeEvent(workload->GetGuid(),
763 timelineUtils->RecordEndOfLifeEvent(workloadInferenceID);
766 for (
auto& output: m_OutputQueue)
770 workloadInferenceID = timelineUtils->RecordWorkloadInferenceAndStartOfLifeEvent(output->GetGuid(),
776 timelineUtils->RecordEndOfLifeEvent(workloadInferenceID);
784 catch (
const std::runtime_error& error)
794 for (
auto&& workloadPtr: m_WorkloadQueue)
796 workloadPtr.get()->RegisterDebugCallback(func);
static ARMNN_DLLEXPORT ProfilingStaticGuid INFERENCE_GUID
virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry &)
(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFa...
virtual bool Import(void *memory, MemorySource source)
Import externally allocated memory.
FactoryFunction GetFactory(const BackendId &id) const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
const bool m_ImportEnabled
void RegisterProfiler(Profiler *profiler)
static std::unique_ptr< TimelineUtilityMethods > GetTimelineUtils(ProfilingService &profilingService)
static ProfilerManager & GetInstance()
virtual unsigned int GetImportFlags() const
Get flags describing supported import sources.
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
TensorInfo GetInputTensorInfo(LayerBindingId layerId) const
#define ARMNN_LOG(severity)
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
unsigned int MemorySourceFlags
size_t GetNumOutputs() const
TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
static ARMNN_DLLEXPORT ProfilingStaticGuid WORKLOAD_GUID
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_EOL_EVENT_CLASS
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
static ARMNN_DLLEXPORT ProfilingStaticGuid ARMNN_PROFILING_SOL_EVENT_CLASS
virtual IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr &memoryManager=nullptr) const =0
std::vector< TensorInfo > m_InputTensorInfos
static ARMNN_DLLEXPORT ProfilingStaticGuid LAYER_GUID
#define ARMNN_ASSERT_MSG(COND, MSG)
bool SupportsTensorAllocatorAPI() const
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
const std::string & GetNameStr() const
const bool m_ExportEnabled
#define ARMNN_ASSERT(COND)
std::vector< TensorInfo > m_OutputTensorInfos
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
const BackendId & GetBackendId() const
armnn::profiling::ProfilingService profilingService
OutputLayersAccessor GetOutputLayers() const
Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-bas...
static ARMNN_DLLEXPORT ProfilingStaticGuid NETWORK_GUID
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual void Unmap() const =0
Unmap the tensor data.
std::vector< ITensorHandle * > m_Outputs
Base class for all ArmNN exceptions so that users can filter to just those.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
const std::string & Get() const
void RegisterDebugCallback(const DebugCallbackFunction &func)
LayerType GetType() const
Contains information about inputs and outputs to a layer.
bool CheckFlag(MemorySourceFlags flags, MemorySource source)
Graph & TopologicalSort()
Sorts layers in topological order and return this.
InputLayersAccessor GetInputLayers() const
Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-base...
std::vector< ITensorHandle * > m_Inputs
size_t GetNumLayers() const
virtual ARMNN_NO_DEPRECATE_WARN_END IMemoryManagerUniquePtr CreateMemoryManager() const
size_t GetNumInputs() const
static ARMNN_DLLEXPORT ProfilingStaticGuid BACKENDID_GUID
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< OptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, profiling::ProfilingService &profilingService)
void SendNetworkStructure()