ArmNN
 24.05
GpuFsaWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 
9 #include "GpuFsaBackendId.hpp"
10 #include "GpuFsaTensorHandle.hpp"
11 
14 
16 
17 namespace armnn
18 {
19 
20 namespace
21 {
22 static const BackendId s_Id{GpuFsaBackendId()};
23 }
24 template <typename QueueDescriptorType>
25 std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::MakeWorkload(const QueueDescriptorType& /*descriptor*/,
26  const WorkloadInfo& /*info*/) const
27 {
28  return nullptr;
29 }
30 
31 template <DataType ArmnnType>
32 bool IsDataType(const WorkloadInfo& info)
33 {
34  auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
35  auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
36  if (it != std::end(info.m_InputTensorInfos))
37  {
38  return true;
39  }
40  it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
41  if (it != std::end(info.m_OutputTensorInfos))
42  {
43  return true;
44  }
45  return false;
46 }
47 
48 GpuFsaWorkloadFactory::GpuFsaWorkloadFactory(const std::shared_ptr<GpuFsaMemoryManager>& memoryManager)
49  : m_MemoryManager(memoryManager)
50 {
52 }
53 
55  : m_MemoryManager(new GpuFsaMemoryManager())
56 {
58 }
59 
61 {
62  return s_Id;
63 }
64 
66  Optional<DataType> dataType,
67  std::string& outReasonIfUnsupported)
68 {
69  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
70 }
71 
72 std::unique_ptr<ITensorHandle> GpuFsaWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
73  const bool /*isMemoryManaged*/) const
74 {
75  std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo);
76  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
77 
78  return tensorHandle;
79 }
80 
81 std::unique_ptr<ITensorHandle> GpuFsaWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
82  DataLayout dataLayout,
83  const bool /*isMemoryManaged*/) const
84 {
85  std::unique_ptr<GpuFsaTensorHandle> tensorHandle = std::make_unique<GpuFsaTensorHandle>(tensorInfo, dataLayout);
86  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
87 
88  return tensorHandle;
89 }
90 
91 
93  // Initialize our m_CLCompileContext using default device and context
94  auto context = arm_compute::CLKernelLibrary::get().context();
95  auto device = arm_compute::CLKernelLibrary::get().get_device();
96  m_CLCompileContext = arm_compute::CLCompileContext(context, device);
97 }
98 
99 std::unique_ptr<IWorkload> GpuFsaWorkloadFactory::CreateWorkload(LayerType type,
100  const QueueDescriptor& descriptor,
101  const WorkloadInfo& info) const
102 {
103  switch(type)
104  {
105  case LayerType::Constant :
106  {
107  auto constQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
108  return std::make_unique<GpuFsaConstantWorkload>(*constQueueDescriptor, info, m_CLCompileContext);
109  }
110  case LayerType::Input :
111  {
112  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
113  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
114  }
115  case LayerType::Output :
116  {
117  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
118  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
119  }
120  case LayerType::MemCopy :
121  {
122  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
123  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
124  {
125  throw InvalidArgumentException("GpuFsaWorkloadFactory: Invalid null input for MemCopy workload");
126  }
127  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
128  }
130  {
131  auto precompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
132  return std::make_unique<GpuFsaPreCompiledWorkload>(*precompiledQueueDescriptor, info);
133  }
134  default :
135  return nullptr;
136  }
137 }
138 
139 } // namespace armnn
armnn::Optional
Definition: Optional.hpp:270
armnn::IsDataType
bool IsDataType(const WorkloadInfo &info)
Definition: GpuFsaWorkloadFactory.cpp:32
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::GpuFsaWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: GpuFsaWorkloadFactory.cpp:65
armnn::GpuFsaWorkloadFactory::GpuFsaWorkloadFactory
GpuFsaWorkloadFactory()
Definition: GpuFsaWorkloadFactory.cpp:54
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::GpuFsaWorkloadFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Definition: GpuFsaWorkloadFactory.cpp:72
armnn::GpuFsaBackendId
constexpr const char * GpuFsaBackendId()
Definition: GpuFsaBackendId.hpp:10
armnn::GpuFsaWorkloadFactory::InitializeCLCompileContext
void InitializeCLCompileContext()
Definition: GpuFsaWorkloadFactory.cpp:92
GpuFsaTensorHandle.hpp
GpuFsaConstantWorkload.hpp
GpuFsaWorkloadFactory.hpp
armnn::Layer
Definition: Layer.hpp:230
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::BoostLogSeverityMapping::info
@ info
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1629
MemCopyWorkload.hpp
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Layer.hpp
armnn::GpuFsaMemoryManager
Definition: GpuFsaMemoryManager.hpp:16
armnn::GpuFsaWorkloadFactory::GetBackendId
const BackendId & GetBackendId() const override
Definition: GpuFsaWorkloadFactory.cpp:60
armnn::LayerType::Input
@ Input
GpuFsaPreCompiledWorkload.hpp
GpuFsaBackendId.hpp
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::GpuFsaWorkloadFactory::CreateWorkload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
Definition: GpuFsaWorkloadFactory.cpp:99
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant