ArmNN
 22.08
RefBackend.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefBackend.hpp"
7 #include "RefBackendId.hpp"
8 #include "RefWorkloadFactory.hpp"
9 #include "RefLayerSupport.hpp"
11 
18 
19 #include <Optimizer.hpp>
20 
21 namespace armnn
22 {
23 
25 {
26  static const BackendId s_Id{RefBackendId()};
27  return s_Id;
28 }
29 
31  const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
32 {
33  return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
34 }
35 
37  class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
38 {
39  auto memoryManager = std::make_shared<RefMemoryManager>();
40 
41  tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
42 
43  std::unique_ptr<RefTensorHandleFactory> factory = std::make_unique<RefTensorHandleFactory>(memoryManager);
44  // Register copy and import factory pair
45  tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
46  // Register the factory
47  tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
48 
49  return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
50 }
51 
53 {
54  return IBackendContextPtr{};
55 }
56 
59 {
61 }
62 
64 {
65  return std::make_unique<RefMemoryManager>();
66 }
67 
69 {
70  static ILayerSupportSharedPtr layerSupport{new RefLayerSupport};
71  return layerSupport;
72 }
73 
75  const ModelOptions& modelOptions) const
76 {
77  OptimizationViews optimizationViews(modelOptions);
78 
79  auto it = subgraph.endIConnectable();
80  std::map<LayerGuid, Layer*> untouched;
81 
82  while (it != subgraph.beginIConnectable())
83  {
84  --it;
85  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
86  untouched.insert({base.GetGuid(), &base});
87  }
88 
89  it = subgraph.endIConnectable();
90  while (it != subgraph.beginIConnectable())
91  {
92  --it;
93  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
94 
95  // Special case to fuse padding into average pooling 2d for quantized datatype.
96  // Required to be done as a backend specific optimization as Neon does not support this special case.
97  if (base.GetType() == LayerType::Pooling2d)
98  {
99  Pooling2dLayer* baseLayer = PolymorphicDowncast<Pooling2dLayer*>(&base);
100  Pooling2dDescriptor poolingDescriptor = baseLayer->GetParameters();
101 
103  {
104  PadLayer* padLayer = PolymorphicDowncast<PadLayer*>(
106  if (padLayer->GetOutputSlot(0).GetNumConnections() == 1 &&
108  poolingDescriptor,
109  padLayer->GetOutputSlot().GetTensorInfo(),
110  true))
111  {
112  FoldPadIntoAveragePool2d<Pooling2dLayer>(optimizationViews, baseLayer,
113  poolingDescriptor, padLayer);
114  untouched.erase(baseLayer->GetGuid());
115  untouched.erase(padLayer->GetGuid());
116  }
117  }
118  }
119  }
120 
121  if (optimizationViews.GetSubstitutions().empty())
122  {
123  optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
124  }
125  else
126  {
127  ReportUntouchedLayers(optimizationViews, untouched);
128  }
129 
130  return optimizationViews;
131 }
132 
133 std::vector<ITensorHandleFactory::FactoryId> RefBackend::GetHandleFactoryPreferences() const
134 {
135  return std::vector<ITensorHandleFactory::FactoryId> { RefTensorHandleFactory::GetIdStatic() };
136 }
137 
139 {
140  auto memoryManager = std::make_shared<RefMemoryManager>();
141 
142  registry.RegisterMemoryManager(memoryManager);
143 
144  std::unique_ptr<RefTensorHandleFactory> factory = std::make_unique<RefTensorHandleFactory>(memoryManager);
145 
146  // Register copy and import factory pair
147  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
148  // Register the factory
149  registry.RegisterFactory(std::move(factory));
150 }
151 
152 std::unique_ptr<ICustomAllocator> RefBackend::GetDefaultAllocator() const
153 {
154  return std::make_unique<DefaultAllocator>();
155 }
156 
158 {
159  ExecutionData executionData;
160  executionData.m_Data = &workingMemDescriptor;
161  return executionData;
162 }
163 
164 void RefBackend::UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const
165 {
166  executionData.m_Data = &workingMemDescriptor;
167 }
168 
169 } // namespace armnn
void RegisterMemoryManager(std::shared_ptr< IMemoryManager > memoryManger)
Register a memory manager with shared ownership.
IConnectableLayerIterator endIConnectable()
static const FactoryId & GetIdStatic()
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
Definition: RefBackend.cpp:63
constexpr const char * RefBackendId()
std::vector< BackendOptions > ModelOptions
void RegisterFactory(std::unique_ptr< ITensorHandleFactory > allocator)
Register a TensorHandleFactory and transfer ownership.
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer *> untouched)
Layer & GetOwningLayer() const
Definition: Layer.hpp:119
void UpdateExecutionData(ExecutionData &executionData, WorkingMemDescriptor &workingMemDescriptor) const override
Update the ExecutionData for a layer.
Definition: RefBackend.cpp:164
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
Definition: RefBackend.cpp:68
IConnectableLayerIterator beginIConnectable()
static const BackendId & GetIdStatic()
Definition: RefBackend.cpp:24
void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry &registry) override
(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFa...
Definition: RefBackend.cpp:138
Copyright (c) 2021 ARM Limited and Contributors.
This layer represents a pad operation.
Definition: PadLayer.hpp:14
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions &) const override
Create the runtime context of the backend.
Definition: RefBackend.cpp:52
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions &creationOptions, IBackendProfilingPtr &backendProfiling) override
Create context specifically used for profiling interaction from backends.
Definition: RefBackend.cpp:57
The SubgraphView class represents a subgraph of a Graph.
unsigned int GetNumConnections() const override
Definition: Layer.hpp:145
void RegisterCopyAndImportFactoryPair(ITensorHandleFactory::FactoryId copyFactoryId, ITensorHandleFactory::FactoryId importFactoryId)
Register a pair of TensorHandleFactory Id for Memory Copy and TensorHandleFactory Id for Memory Impor...
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
bool TryFoldPadIntoLayer2d(const PadDescriptor &padDescriptor, Descriptor &layerDescriptor, const TensorInfo &tensorInfo)
ExecutionData CreateExecutionData(WorkingMemDescriptor &workingMemDescriptor) const override
Returns ExecutionData for the backend.
Definition: RefBackend.cpp:157
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
OptimizationViews OptimizeSubgraphView(const SubgraphView &subgraph, const ModelOptions &modelOptions) const override
Definition: RefBackend.cpp:74
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
Definition: RefBackend.cpp:30
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:273
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
void AddUntouchedSubgraph(SubgraphView &&subgraph)
std::shared_ptr< arm::pipe::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we&#39;ll keep it in the backend namespace...
std::unique_ptr< ICustomAllocator > GetDefaultAllocator() const override
Returns the default memory allocator for the backend.
Definition: RefBackend.cpp:152
This layer represents a pooling 2d operation.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
const Substitutions & GetSubstitutions() const
std::unique_ptr< arm::pipe::IBackendProfiling > IBackendProfilingPtr
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
A Pooling2dDescriptor for the Pooling2dLayer.
std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences() const override
(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
Definition: RefBackend.cpp:133
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:330
std::unique_ptr< IBackendContext > IBackendContextPtr