ArmNN
 20.02
LoadedNetwork.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Tensor.hpp>
8 #include <armnn/Types.hpp>
9 
10 #include "Network.hpp"
11 #include "LayerFwd.hpp"
12 #include "Profiling.hpp"
13 
19 
20 #include <mutex>
21 #include <unordered_map>
22 
23 namespace cl
24 {
25  class Context;
26  class CommandQueue;
27  class Device;
28 }
29 
30 namespace armnn
31 {
32 
34 {
35 public:
36  using WorkloadQueue = std::vector< std::unique_ptr<IWorkload> >;
37  ~LoadedNetwork(){ FreeWorkingMemory(); }
38 
40  TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const;
41 
42  Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors);
43 
44  static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
45  std::string & errorMessage,
46  const INetworkProperties& networkProperties);
47 
48  // NOTE we return by reference as the purpose of this method is only to provide
49  // access to the private m_Profiler and in theory we should not need to increment
50  // the shared_ptr's reference counter
51  const std::shared_ptr<Profiler>& GetProfiler() const { return m_Profiler; }
52 
53  void FreeWorkingMemory();
54 
55  void RegisterDebugCallback(const DebugCallbackFunction& func);
56 
57 private:
58  void AllocateWorkingMemory();
59 
60  LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, const INetworkProperties& networkProperties);
61 
62  void EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
63 
64  void EnqueueOutput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
65 
66  bool Execute(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
67  profiling::ProfilingGuid inferenceGuid);
68 
69 
70  const IWorkloadFactory& GetWorkloadFactory(const Layer& layer) const;
71 
72  using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
73 
74  using WorkloadFactoryWithMemoryManager =
75  std::pair<IBackendInternal::IWorkloadFactoryPtr, IBackendInternal::IMemoryManagerSharedPtr>;
76 
77  using WorkloadFactoryMap = std::unordered_map<BackendId, WorkloadFactoryWithMemoryManager>;
78 
79  BackendPtrMap m_Backends;
80  WorkloadFactoryMap m_WorkloadFactories;
81 
82  std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork;
83  WorkloadQueue m_InputQueue;
84  WorkloadQueue m_WorkloadQueue;
85  WorkloadQueue m_OutputQueue;
86  std::shared_ptr<Profiler> m_Profiler;
87 
88  mutable std::mutex m_WorkingMemMutex;
89 
90  bool m_IsWorkingMemAllocated=false;
91  bool m_IsImportEnabled=false;
92  bool m_IsExportEnabled=false;
93 
94  TensorHandleFactoryRegistry m_TensorHandleFactoryRegistry;
95 };
96 
97 }
std::vector< std::unique_ptr< IWorkload > > WorkloadQueue
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
const std::shared_ptr< Profiler > & GetProfiler() const
Copyright (c) 2020 ARM Limited.
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:244
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
Status
enumeration
Definition: Types.hpp:26
TensorInfo GetInputTensorInfo(const Network *network)