ArmNN
 24.02
GpuFsaWorkloadFactory Class Reference

#include <GpuFsaWorkloadFactory.hpp>

Inheritance diagram for GpuFsaWorkloadFactory:
[legend]
Collaboration diagram for GpuFsaWorkloadFactory:
[legend]

Public Member Functions

 GpuFsaWorkloadFactory (const std::shared_ptr< GpuFsaMemoryManager > &memoryManager)
 
 GpuFsaWorkloadFactory ()
 
 ~GpuFsaWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &, TensorShape const &, unsigned int const *) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
void InitializeCLCompileContext ()
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 Backends should implement their own CreateWorkload function with a switch statement. More...
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 17 of file GpuFsaWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ GpuFsaWorkloadFactory() [1/2]

GpuFsaWorkloadFactory ( const std::shared_ptr< GpuFsaMemoryManager > &  memoryManager)
explicit

Definition at line 48 of file GpuFsaWorkloadFactory.cpp.

49  : m_MemoryManager(memoryManager)
50 {
52 }

References GpuFsaWorkloadFactory::InitializeCLCompileContext().

◆ GpuFsaWorkloadFactory() [2/2]

Definition at line 54 of file GpuFsaWorkloadFactory.cpp.

55  : m_MemoryManager(new GpuFsaMemoryManager())
56 {
58 }

References GpuFsaWorkloadFactory::InitializeCLCompileContext().

◆ ~GpuFsaWorkloadFactory()

~GpuFsaWorkloadFactory ( )
inline

Definition at line 23 of file GpuFsaWorkloadFactory.hpp.

23 {}

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle ,
TensorShape const &  ,
unsigned int const *   
) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 34 of file GpuFsaWorkloadFactory.hpp.

37  {
38  return nullptr;
39  }

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 72 of file GpuFsaWorkloadFactory.cpp.

74 {
75  std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo);
76  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
77 
78  return tensorHandle;
79 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 81 of file GpuFsaWorkloadFactory.cpp.

84 {
85  std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo, dataLayout);
86  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
87 
88  return tensorHandle;
89 }

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Backends should implement their own CreateWorkload function with a switch statement.

The case for the switch should be the LayerType and based on that they will call their specific workload creation functionality.

Implements IWorkloadFactory.

Definition at line 99 of file GpuFsaWorkloadFactory.cpp.

102 {
103  switch(type)
104  {
105  case LayerType::Constant :
106  {
107  auto constQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
108  return std::make_unique<GpuFsaConstantWorkload>(*constQueueDescriptor, info, m_CLCompileContext);
109  }
110  case LayerType::Input :
111  {
112  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
113  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
114  }
115  case LayerType::Output :
116  {
117  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
118  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
119  }
120  case LayerType::MemCopy :
121  {
122  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
123  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
124  {
125  throw InvalidArgumentException("GpuFsaWorkloadFactory: Invalid null input for MemCopy workload");
126  }
127  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
128  }
130  {
131  auto precompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
132  return std::make_unique<GpuFsaPreCompiledWorkload>(*precompiledQueueDescriptor, info);
133  }
134  default :
135  return nullptr;
136  }
137 }

References armnn::Constant, armnn::info, armnn::Input, armnn::MemCopy, armnn::Output, and armnn::PreCompiled.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 60 of file GpuFsaWorkloadFactory.cpp.

61 {
62  return s_Id;
63 }

◆ InitializeCLCompileContext()

void InitializeCLCompileContext ( )

Definition at line 92 of file GpuFsaWorkloadFactory.cpp.

92  {
93  // Initialize our m_CLCompileContext using default device and context
94  auto context = arm_compute::CLKernelLibrary::get().context();
95  auto device = arm_compute::CLKernelLibrary::get().get_device();
96  m_CLCompileContext = arm_compute::CLCompileContext(context, device);
97 }

Referenced by GpuFsaWorkloadFactory::GpuFsaWorkloadFactory().

◆ IsLayerSupported()

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 65 of file GpuFsaWorkloadFactory.cpp.

68 {
69  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
70 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 31 of file GpuFsaWorkloadFactory.hpp.

31 { return false; }

The documentation for this class was generated from the following files:
armnn::GpuFsaWorkloadFactory::InitializeCLCompileContext
void InitializeCLCompileContext()
Definition: GpuFsaWorkloadFactory.cpp:92
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Input
@ Input
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant