ArmNN
 24.02
RefBackend Class Reference

#include <RefBackend.hpp>

Inheritance diagram for RefBackend:
[legend]
Collaboration diagram for RefBackend:
[legend]

Public Member Functions

 RefBackend ()=default
 
 ~RefBackend ()=default
 
const BackendIdGetId () const override
 
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager () const override
 
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory (const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
 
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry) const override
 
IBackendInternal::IBackendContextPtr CreateBackendContext (const IRuntime::CreationOptions &) const override
 Create the runtime context of the backend. More...
 
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext (const IRuntime::CreationOptions &creationOptions, IBackendProfilingPtr &backendProfiling) override
 Create context specifically used for profiling interaction from backends. More...
 
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport () const override
 
OptimizationViews OptimizeSubgraphView (const SubgraphView &subgraph, const ModelOptions &modelOptions) const override
 
std::vector< ITensorHandleFactory::FactoryIdGetHandleFactoryPreferences () const override
 (Optional) Returns a vector of supported TensorHandleFactory ids in preference order. More...
 
void RegisterTensorHandleFactories (class TensorHandleFactoryRegistry &registry) override
 (Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented. More...
 
BackendCapabilities GetCapabilities () const override
 Returns a BackendCapability if the backend lists the capability The BackendCapability must then be inspected to check whether or not that BackendCapability is supported Otherwise returns an EmptyOptional if the BackendCapability is unlisted. More...
 
std::unique_ptr< ICustomAllocatorGetDefaultAllocator () const override
 Returns the default memory allocator for the backend. More...
 
ExecutionData CreateExecutionData (WorkingMemDescriptor &workingMemDescriptor) const override
 Returns ExecutionData for the backend. More...
 
void UpdateExecutionData (ExecutionData &executionData, WorkingMemDescriptor &workingMemDescriptor) const override
 Update the ExecutionData for a layer. More...
 
- Public Member Functions inherited from IBackendInternal
 ~IBackendInternal () override=default
 Allow backends created by the factory function to be destroyed through IBackendInternal. More...
 
virtual IWorkloadFactoryPtr CreateWorkloadFactory (const IMemoryManagerSharedPtr &memoryManager, const ModelOptions &modelOptions) const
 
virtual IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry, const ModelOptions &modelOptions) const
 
virtual IWorkloadFactoryPtr CreateWorkloadFactory (class TensorHandleFactoryRegistry &tensorHandleFactoryRegistry, const ModelOptions &modelOptions, MemorySourceFlags inputFlags, MemorySourceFlags outputFlags) const
 
virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext (const ModelOptions &modelOptions) const
 
virtual ILayerSupportSharedPtr GetLayerSupport (const ModelOptions &modelOptions) const
 
virtual OptimizationViews OptimizeSubgraphView (const SubgraphView &subgraph) const
 
bool SupportsTensorAllocatorAPI () const
 
ITensorHandleFactory::FactoryId GetBackwardCompatibleFavoriteHandleFactory ()
 
virtual void RegisterTensorHandleFactories (class TensorHandleFactoryRegistry &registry, MemorySourceFlags inputFlags, MemorySourceFlags outputFlags)
 (Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented. More...
 
virtual bool UseCustomMemoryAllocator (std::shared_ptr< ICustomAllocator > allocator, armnn::Optional< std::string & > errMsg)
 Signals the backend to use a custom memory allocator provided by the user. More...
 
virtual unsigned int GetNumberOfCacheFiles () const
 Returns the number of files cached if backend supports caching. More...
 

Static Public Member Functions

static const BackendIdGetIdStatic ()
 
- Static Public Member Functions inherited from IBackendInternal
static constexpr BackendVersion GetApiVersion ()
 Returns the version of the Backend API. More...
 

Additional Inherited Members

- Public Types inherited from IBackendInternal
using IWorkloadFactoryPtr = std::unique_ptr< IWorkloadFactory >
 
using IBackendContextPtr = std::unique_ptr< IBackendContext >
 
using IBackendProfilingContextPtr = std::shared_ptr< arm::pipe::IBackendProfilingContext >
 This is the bridge between backend and backend profiling we'll keep it in the backend namespace. More...
 
using IBackendProfilingPtr = std::unique_ptr< arm::pipe::IBackendProfiling >
 
using ILayerSupportSharedPtr = std::shared_ptr< ILayerSupport >
 
using IBackendSpecificModelContextPtr = std::shared_ptr< IBackendModelContext >
 
using IMemoryManagerUniquePtr = std::unique_ptr< IMemoryManager >
 
using IMemoryManagerSharedPtr = std::shared_ptr< IMemoryManager >
 
- Protected Member Functions inherited from IBackendInternal
 IBackendInternal ()=default
 Creation must be done through a specific backend interface. More...
 
- Protected Member Functions inherited from IBackend
 IBackend ()
 
virtual ~IBackend ()
 

Detailed Description

Definition at line 30 of file RefBackend.hpp.

Constructor & Destructor Documentation

◆ RefBackend()

RefBackend ( )
default

◆ ~RefBackend()

~RefBackend ( )
default

Member Function Documentation

◆ CreateBackendContext()

IBackendInternal::IBackendContextPtr CreateBackendContext ( const IRuntime::CreationOptions ) const
overridevirtual

Create the runtime context of the backend.

Implementations may return a default-constructed IBackendContextPtr if no context is needed at runtime. Implementations must throw BackendUnavailableException if the backend cannot be used (for example, necessary accelerator hardware is not present). The default implementation always returns a default-constructed pointer.

Reimplemented from IBackendInternal.

Definition at line 50 of file RefBackend.cpp.

51 {
52  return IBackendContextPtr{};
53 }

◆ CreateBackendProfilingContext()

IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext ( const IRuntime::CreationOptions creationOptions,
IBackendProfilingPtr backendProfiling 
)
overridevirtual

Create context specifically used for profiling interaction from backends.

Reimplemented from IBackendInternal.

Definition at line 55 of file RefBackend.cpp.

57 {
59 }

◆ CreateExecutionData()

ExecutionData CreateExecutionData ( WorkingMemDescriptor workingMemDescriptor) const
overridevirtual

Returns ExecutionData for the backend.

Parameters
workingMemDescriptor- Vectors of input and output TensorHandles for a layer
Returns
- Returns backend specific ExecutionData generated for a layer

Reimplemented from IBackendInternal.

Definition at line 162 of file RefBackend.cpp.

163 {
164  ExecutionData executionData;
165  executionData.m_Data = &workingMemDescriptor;
166  return executionData;
167 }

References ExecutionData::m_Data.

◆ CreateMemoryManager()

IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager ( ) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 61 of file RefBackend.cpp.

62 {
63  return std::make_unique<RefMemoryManager>();
64 }

◆ CreateWorkloadFactory() [1/2]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( class TensorHandleFactoryRegistry tensorHandleFactoryRegistry) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 34 of file RefBackend.cpp.

36 {
37  auto memoryManager = std::make_shared<RefMemoryManager>();
38 
39  tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
40 
41  std::unique_ptr<RefTensorHandleFactory> factory = std::make_unique<RefTensorHandleFactory>(memoryManager);
42  // Register copy and import factory pair
43  tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
44  // Register the factory
45  tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
46 
47  return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
48 }

References TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

◆ CreateWorkloadFactory() [2/2]

IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory ( const IBackendInternal::IMemoryManagerSharedPtr memoryManager = nullptr) const
overridevirtual

Implements IBackendInternal.

Definition at line 28 of file RefBackend.cpp.

30 {
31  return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
32 }

◆ GetCapabilities()

BackendCapabilities GetCapabilities ( ) const
inlineoverridevirtual

Returns a BackendCapability if the backend lists the capability The BackendCapability must then be inspected to check whether or not that BackendCapability is supported Otherwise returns an EmptyOptional if the BackendCapability is unlisted.

Reimplemented from IBackendInternal.

Definition at line 61 of file RefBackend.hpp.

62  {
63  return cpuRefCapabilities;
64  };

References armnn::cpuRefCapabilities.

◆ GetDefaultAllocator()

std::unique_ptr< ICustomAllocator > GetDefaultAllocator ( ) const
overridevirtual

Returns the default memory allocator for the backend.

Returns
- Returns unique pointer to the Default Allocator of the Backend

Reimplemented from IBackendInternal.

Definition at line 157 of file RefBackend.cpp.

158 {
159  return std::make_unique<DefaultAllocator>();
160 }

◆ GetHandleFactoryPreferences()

std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences ( ) const
overridevirtual

(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.

Reimplemented from IBackendInternal.

Definition at line 138 of file RefBackend.cpp.

139 {
140  return std::vector<ITensorHandleFactory::FactoryId> { RefTensorHandleFactory::GetIdStatic() };
141 }

References RefTensorHandleFactory::GetIdStatic().

◆ GetId()

const BackendId& GetId ( ) const
inlineoverridevirtual

Implements IBackend.

Definition at line 37 of file RefBackend.hpp.

37 { return GetIdStatic(); }

References RefBackend::GetIdStatic().

◆ GetIdStatic()

const BackendId & GetIdStatic ( )
static

Definition at line 22 of file RefBackend.cpp.

23 {
24  static const BackendId s_Id{RefBackendId()};
25  return s_Id;
26 }

References armnn::RefBackendId().

Referenced by GetBackendId(), and RefBackend::GetId().

◆ GetLayerSupport()

IBackendInternal::ILayerSupportSharedPtr GetLayerSupport ( ) const
overridevirtual

Implements IBackendInternal.

Definition at line 66 of file RefBackend.cpp.

67 {
68  static ILayerSupportSharedPtr layerSupport{new RefLayerSupport};
69  return layerSupport;
70 }

◆ OptimizeSubgraphView()

OptimizationViews OptimizeSubgraphView ( const SubgraphView subgraph,
const ModelOptions modelOptions 
) const
overridevirtual

Reimplemented from IBackendInternal.

Definition at line 72 of file RefBackend.cpp.

74 {
75  OptimizationViews optimizationViews(modelOptions);
76 
77  auto it = subgraph.end();
78  std::map<LayerGuid, Layer*> untouched;
79 
80  while (it != subgraph.begin())
81  {
82  --it;
83  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
84  untouched.insert({base.GetGuid(), &base});
85  }
86 
87  it = subgraph.end();
88  while (it != subgraph.begin())
89  {
90  --it;
91  Layer& base = *(PolymorphicDowncast<Layer*>(*it));
92 
93  // Special case to fuse padding into average pooling 2d for quantized datatype.
94  // Required to be done as a backend specific optimization as Neon does not support this special case.
95  if (base.GetType() == LayerType::Pooling2d)
96  {
97  Pooling2dLayer* baseLayer = PolymorphicDowncast<Pooling2dLayer*>(&base);
98  Pooling2dDescriptor poolingDescriptor = baseLayer->GetParameters();
99 
100  if (baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer().GetType() == LayerType::Pad)
101  {
102  PadLayer* padLayer = PolymorphicDowncast<PadLayer*>(
103  &baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer());
104  if (padLayer->GetOutputSlot(0).GetNumConnections() == 1 &&
105  optimizations::pad_fold::TryFoldPadIntoLayer2d(padLayer->GetParameters(),
106  poolingDescriptor,
107  padLayer->GetOutputSlot().GetTensorInfo(),
108  true))
109  {
110  FoldPadIntoAveragePool2d<Pooling2dLayer>(optimizationViews, baseLayer,
111  poolingDescriptor, padLayer);
112  untouched.erase(baseLayer->GetGuid());
113  untouched.erase(padLayer->GetGuid());
114  }
115  }
116  }
117 
118  // Remove Reshape where possible
119  if (base.GetType() == LayerType::Reshape)
120  {
121  ReshapeLayer* baseLayer = PolymorphicDowncast<ReshapeLayer*>(&base);
122  RemoveReshapeLayer(baseLayer, untouched, optimizationViews);
123  }
124  }
125 
126  if (optimizationViews.GetSubstitutions().empty() && optimizationViews.GetDeletedSubgraphs().empty())
127  {
128  optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
129  }
130  else
131  {
132  ReportUntouchedLayers(optimizationViews, untouched);
133  }
134 
135  return optimizationViews;
136 }

References OptimizationViews::AddUntouchedSubgraph(), SubgraphView::begin(), SubgraphView::end(), InputSlot::GetConnectedOutputSlot(), OptimizationViews::GetDeletedSubgraphs(), Layer::GetGuid(), Layer::GetInputSlot(), OutputSlot::GetNumConnections(), Layer::GetOutputSlot(), OutputSlot::GetOwningLayer(), LayerWithParameters< Parameters >::GetParameters(), OptimizationViews::GetSubstitutions(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::Pad, armnn::Pooling2d, armnn::RemoveReshapeLayer(), armnn::ReportUntouchedLayers(), armnn::Reshape, and armnn::optimizations::pad_fold::TryFoldPadIntoLayer2d().

◆ RegisterTensorHandleFactories()

void RegisterTensorHandleFactories ( class TensorHandleFactoryRegistry )
overridevirtual

(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented.

Reimplemented from IBackendInternal.

Definition at line 143 of file RefBackend.cpp.

144 {
145  auto memoryManager = std::make_shared<RefMemoryManager>();
146 
147  registry.RegisterMemoryManager(memoryManager);
148 
149  std::unique_ptr<RefTensorHandleFactory> factory = std::make_unique<RefTensorHandleFactory>(memoryManager);
150 
151  // Register copy and import factory pair
152  registry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
153  // Register the factory
154  registry.RegisterFactory(std::move(factory));
155 }

References TensorHandleFactoryRegistry::RegisterCopyAndImportFactoryPair(), TensorHandleFactoryRegistry::RegisterFactory(), and TensorHandleFactoryRegistry::RegisterMemoryManager().

◆ UpdateExecutionData()

void UpdateExecutionData ( ExecutionData executionData,
WorkingMemDescriptor workingMemDescriptor 
) const
overridevirtual

Update the ExecutionData for a layer.

It is used to swap in pre-imported tensor handles

Parameters
executionData- Backend specific ExecutionData generated for a layer
workingMemDescriptor- Vectors of input and output TensorHandles for a layer

Reimplemented from IBackendInternal.

Definition at line 169 of file RefBackend.cpp.

170 {
171  executionData.m_Data = &workingMemDescriptor;
172 }

References ExecutionData::m_Data.


The documentation for this class was generated from the following files:
armnn::RefTensorHandleFactory::GetIdStatic
static const FactoryId & GetIdStatic()
Definition: RefTensorHandleFactory.cpp:16
armnn::RefBackendId
constexpr const char * RefBackendId()
Definition: RefBackendId.hpp:10
armnn::IBackendInternal::IBackendContextPtr
std::unique_ptr< IBackendContext > IBackendContextPtr
Definition: IBackendInternal.hpp:90
armnn::RefBackend::GetIdStatic
static const BackendId & GetIdStatic()
Definition: RefBackend.cpp:22
armnn::optimizations::pad_fold::TryFoldPadIntoLayer2d
bool TryFoldPadIntoLayer2d(const PadDescriptor &padDescriptor, Descriptor &layerDescriptor, const TensorInfo &tensorInfo)
Definition: FoldPadIntoLayer2d.hpp:88
armnn::RemoveReshapeLayer
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
Definition: SubgraphUtils.hpp:293
armnn::cpuRefCapabilities
const BackendCapabilities cpuRefCapabilities("CpuRef", { {"NonConstWeights", true}, {"AsyncExecution", true}, {"ProtectedContentAllocation", false}, {"ConstantTensorsAsInputs", true}, {"PreImportIOTensors", true}, {"ExternallyManagedMemory", true}, {"MultiAxisPacking", false}, {"SingleAxisPacking", true}, {"HasFp16", true} })
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IBackendInternal::IBackendProfilingContextPtr
std::shared_ptr< arm::pipe::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
Definition: IBackendInternal.hpp:92
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::Pad
@ Pad
armnn::IBackendInternal::ILayerSupportSharedPtr
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
Definition: IBackendInternal.hpp:94
armnn::ReportUntouchedLayers
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
Definition: SubgraphUtils.hpp:220
armnnDeserializer::Pooling2dDescriptor
const armnnSerializer::Pooling2dDescriptor * Pooling2dDescriptor
Definition: Deserializer.hpp:21