ArmNN
 20.05
LoadedNetwork.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Tensor.hpp>
8 #include <armnn/Types.hpp>
9 
10 #include "Network.hpp"
11 #include "LayerFwd.hpp"
12 #include "Profiling.hpp"
13 
18 #include <ProfilingService.hpp>
20 
21 #include <mutex>
22 #include <unordered_map>
23 
24 namespace cl
25 {
26  class Context;
27  class CommandQueue;
28  class Device;
29 }
30 
31 namespace armnn
32 {
33 
35 {
36 public:
37  using WorkloadQueue = std::vector< std::unique_ptr<IWorkload> >;
38  ~LoadedNetwork(){ FreeWorkingMemory(); }
39 
41  TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const;
42 
43  Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors);
44 
45  static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
46  std::string & errorMessage,
47  const INetworkProperties& networkProperties,
49 
50  // NOTE we return by reference as the purpose of this method is only to provide
51  // access to the private m_Profiler and in theory we should not need to increment
52  // the shared_ptr's reference counter
53  const std::shared_ptr<Profiler>& GetProfiler() const { return m_Profiler; }
54 
55  void FreeWorkingMemory();
56 
57  void RegisterDebugCallback(const DebugCallbackFunction& func);
58 
59  void SendNetworkStructure();
60 
61 private:
62  void AllocateWorkingMemory();
63 
64  LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
65  const INetworkProperties& networkProperties,
66  profiling::ProfilingService& profilingService);
67 
68  void EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
69 
70  void EnqueueOutput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
71 
72  bool Execute(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
73  profiling::ProfilingGuid inferenceGuid);
74 
75 
76  const IWorkloadFactory& GetWorkloadFactory(const Layer& layer) const;
77 
78  using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
79 
80  using WorkloadFactoryWithMemoryManager =
81  std::pair<IBackendInternal::IWorkloadFactoryPtr, IBackendInternal::IMemoryManagerSharedPtr>;
82 
83  using WorkloadFactoryMap = std::unordered_map<BackendId, WorkloadFactoryWithMemoryManager>;
84 
85  BackendPtrMap m_Backends;
86  WorkloadFactoryMap m_WorkloadFactories;
87 
88  std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork;
89  WorkloadQueue m_InputQueue;
90  WorkloadQueue m_WorkloadQueue;
91  WorkloadQueue m_OutputQueue;
92  std::shared_ptr<Profiler> m_Profiler;
93 
94  mutable std::mutex m_WorkingMemMutex;
95 
96  bool m_IsWorkingMemAllocated=false;
97  bool m_IsImportEnabled=false;
98  bool m_IsExportEnabled=false;
99 
100  TensorHandleFactoryRegistry m_TensorHandleFactoryRegistry;
101 
102  profiling::ProfilingService& m_ProfilingService;
103 };
104 
105 }
std::vector< std::unique_ptr< IWorkload > > WorkloadQueue
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
const std::shared_ptr< Profiler > & GetProfiler() const
Copyright (c) 2020 ARM Limited.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:244
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
Status
enumeration
Definition: Types.hpp:26
armnn::profiling::ProfilingService profilingService
TensorInfo GetInputTensorInfo(const Network *network)