ArmNN
 23.02
ClWorkloadFactory Class Reference

#include <ClWorkloadFactory.hpp>

Inheritance diagram for ClWorkloadFactory:
WorkloadFactoryBase IWorkloadFactory

Public Member Functions

 ClWorkloadFactory (const std::shared_ptr< ClMemoryManager > &memoryManager)
 
 ClWorkloadFactory (const std::shared_ptr< ClMemoryManager > &memoryManager, const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
void AfterWorkloadsCreated () override
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
- Public Member Functions inherited from WorkloadFactoryBase
std::unique_ptr< IWorkloadCreateInput (const InputQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateActivation (const ActivationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateAddition (const AdditionQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateArgMinMax (const ArgMinMaxQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateBatchNormalization (const BatchNormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateBatchToSpaceNd (const BatchToSpaceNdQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateComparison (const ComparisonQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConcat (const ConcatQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConstant (const ConstantQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConvertFp16ToFp32 (const ConvertFp16ToFp32QueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConvertFp32ToFp16 (const ConvertFp32ToFp16QueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConvolution2d (const Convolution2dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDebug (const DebugQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDepthToSpace (const DepthToSpaceQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDepthwiseConvolution2d (const DepthwiseConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDequantize (const DequantizeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDetectionPostProcess (const DetectionPostProcessQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDivision (const DivisionQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateElementwiseUnary (const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
std::unique_ptr< IWorkloadCreateFakeQuantization (const FakeQuantizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateFloor (const FloorQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateFullyConnected (const FullyConnectedQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateGather (const GatherQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateInstanceNormalization (const InstanceNormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateL2Normalization (const L2NormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateLogSoftmax (const LogSoftmaxQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateLstm (const LstmQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMaximum (const MaximumQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMean (const MeanQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMemCopy (const MemCopyQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMemImport (const MemImportQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMerge (const MergeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMinimum (const MinimumQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMultiplication (const MultiplicationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateNormalization (const NormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateOutput (const OutputQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePad (const PadQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePermute (const PermuteQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePooling2d (const Pooling2dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePooling3d (const Pooling3dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePreCompiled (const PreCompiledQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePrelu (const PreluQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateQuantize (const QuantizeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateQuantizedLstm (const QuantizedLstmQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateRank (const RankQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateReshape (const ReshapeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateResize (const ResizeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSlice (const SliceQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSoftmax (const SoftmaxQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSpaceToBatchNd (const SpaceToBatchNdQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSpaceToDepth (const SpaceToDepthQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSubtraction (const SubtractionQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSplitter (const SplitterQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateStack (const StackQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateStridedSlice (const StridedSliceQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSwitch (const SwitchQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateTranspose (const TransposeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateTransposeConvolution2d (const TransposeConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 21 of file ClWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ ClWorkloadFactory() [1/2]

ClWorkloadFactory ( const std::shared_ptr< ClMemoryManager > &  memoryManager)

Definition at line 188 of file ClWorkloadFactory.cpp.

189  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
190 {
191  InitializeCLCompileContext();
192 }

◆ ClWorkloadFactory() [2/2]

ClWorkloadFactory ( const std::shared_ptr< ClMemoryManager > &  memoryManager,
const IBackendInternal::IBackendSpecificModelContextPtr modelContextPtr 
)

Definition at line 194 of file ClWorkloadFactory.cpp.

196  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
197 {
198  InitializeCLCompileContext();
199 }

Member Function Documentation

◆ AfterWorkloadsCreated()

void AfterWorkloadsCreated ( )
overridevirtual

Reimplemented from IWorkloadFactory.

Definition at line 66 of file ClWorkloadFactory.cpp.

67 {
68  if(m_ModelContextPtr)
69  {
70  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
71  if (modelOptions->SaveCachedNetwork())
72  {
73  ClContextSerializer serializer;
74  serializer.Serialize(m_CLCompileContext);
75  auto cachedFd = modelOptions->GetCachedFileDescriptor();
76  if (cachedFd != -1)
77  {
78  std::vector<uint8_t> compiledContextData;
79  std::stringstream stream;
80  bool serialized = serializer.SaveSerializedToStream(stream);
81  if (serialized)
82  {
83  std::string const serializedString{stream.str()};
84  std::copy(serializedString.begin(),
85  serializedString.end(),
86  std::back_inserter(compiledContextData));
87  auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
88  if (success == -1)
89  {
90  ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
91  }
92  }
93  }
94 
95  // Save map to a filepath provided in ModelOptions
96  auto filePath = modelOptions->GetCachedNetworkFilePath();
97  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
98  {
99  // Serialize ClContext to the file specified
100  std::ofstream file(filePath, std::ios::out | std::ios::binary);
101  serializer.SaveSerializedToStream(file);
102  }
103  }
104  }
105 }

References ARMNN_LOG, and armnn::info.

◆ CreateSubTensorHandle()

std::unique_ptr< ITensorHandle > CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 222 of file ClWorkloadFactory.cpp.

225 {
227  arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
228 
229  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
230  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
231  {
232  // Arm compute indexes tensor coords in reverse order.
233  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
234  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
235  }
236 
237  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
238  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
239  {
240  return nullptr;
241  }
242 
243  return std::make_unique<ClSubTensorHandle>(
244  PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
245 }

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 201 of file ClWorkloadFactory.cpp.

203 {
204  IgnoreUnused(IsMemoryManaged);
205  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
206  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
207 
208  return tensorHandle;
209 }

References armnn::IgnoreUnused().

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 211 of file ClWorkloadFactory.cpp.

214 {
215  IgnoreUnused(IsMemoryManaged);
216  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
217  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
218 
219  return tensorHandle;
220 }

References armnn::IgnoreUnused().

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 247 of file ClWorkloadFactory.cpp.

250 {
251  switch(type)
252  {
253  case LayerType::Activation :
254  {
255  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
256  return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
257  }
258  case LayerType::Addition :
259  {
260  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
261  return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
262  }
263  case LayerType::ArgMinMax :
264  {
265  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
266  return MakeWorkload<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
267  }
269  {
270  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
271  return std::make_unique<ClBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, m_CLCompileContext);
272  }
274  {
275  auto batchNormalizationQueueDescriptor
276  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
277  return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
278  (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
279  }
281  {
282  auto batchToSpaceNdQueueDescriptor
283  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
284  return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
285  }
286  case LayerType::Cast :
287  {
288  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
289  return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
290  }
292  {
293  auto channelShuffleQueueDescriptor
294  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
295  return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
296  }
297  case LayerType::Comparison :
298  {
299  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
300  return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
301  }
302  case LayerType::Concat :
303  {
304  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
305  return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
306  }
307  case LayerType::Constant :
308  {
309  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
310  return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
311  }
313  {
314  auto convertFp16ToFp32QueueDescriptor
315  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
316  return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
317  info,
318  m_CLCompileContext);
319  }
321  {
322  auto convertFp32ToFp16QueueDescriptor
323  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
324  return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
325  info,
326  m_CLCompileContext);
327  }
329  {
330  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
331 
332  bool isFastMathEnabled = false;
333  if (m_ModelContextPtr)
334  {
335  if (m_ModelContextPtr.get() != nullptr)
336  {
337  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
338  if (modelOptions)
339  {
340  isFastMathEnabled = modelOptions->IsFastMathEnabled();
341  }
342  }
343  }
344  return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
345  info,
346  m_MemoryManager->GetIntraLayerManager(),
347  m_CLCompileContext,
348  isFastMathEnabled);
349  }
351  {
352  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
353 
354  bool isFastMathEnabled = false;
355  if (m_ModelContextPtr)
356  {
357  if (m_ModelContextPtr.get() != nullptr)
358  {
359  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
360  if (modelOptions)
361  {
362  isFastMathEnabled = modelOptions->IsFastMathEnabled();
363  }
364  }
365  }
366  return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
367  info,
368  m_MemoryManager->GetIntraLayerManager(),
369  m_CLCompileContext,
370  isFastMathEnabled);
371  }
372  case LayerType::Debug :
373  {
374  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
375  return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
376  }
378  {
379  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
380  return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
381  }
383  {
384  auto depthwiseConvolution2dQueueDescriptor
385  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
386  return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
387  info,
388  m_CLCompileContext);
389  }
390  case LayerType::Dequantize :
391  {
392  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
393  return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
394  }
396  {
397  auto detectionPostProcessQueueDescriptor
398  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
399  return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
400  info,
401  m_CLCompileContext);
402  }
403  case LayerType::Division :
404  {
405  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
406  return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
407  }
409  {
410  auto elementwiseUnaryQueueDescriptor
411  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
412 
413  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
414  {
415  case UnaryOperation::Abs:
416  {
417  AbsQueueDescriptor absQueueDescriptor;
418  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
419  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
420 
421  return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
422  }
423  case UnaryOperation::Exp:
424  return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
425  case UnaryOperation::Log:
426  return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
428  return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
429  info,
430  m_CLCompileContext);
431  case UnaryOperation::Neg:
432  return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
434  {
435  RsqrtQueueDescriptor rsqrtQueueDescriptor;
436  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
437  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
438 
439  return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
440  }
441  case UnaryOperation::Sin:
442  return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
444  return std::make_unique<ClSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
445  default:
446  return nullptr;
447  }
448  }
449  case LayerType::Fill :
450  {
451  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
452  return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
453  }
454  case LayerType::Floor :
455  {
456  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
457  return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
458  }
460  {
461  auto fullyConnectedQueueDescriptor
462  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
463  return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
464  info,
465  m_MemoryManager->GetIntraLayerManager(),
466  m_CLCompileContext);
467  }
468  case LayerType::Gather :
469  {
470  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
471  return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
472  }
473  case LayerType::GatherNd :
474  {
475  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
476  return MakeWorkload<ClGatherNdWorkload>(*gatherNdQueueDescriptor, info, m_CLCompileContext);
477  }
478  case LayerType::Input :
479  {
480  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
481  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
482  }
484  {
485  auto instanceNormalizationQueueDescriptor
486  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
487  return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
488  info,
489  m_CLCompileContext);
490  }
492  {
493  auto l2NormalizationQueueDescriptor
494  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
495  return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
496  info,
497  m_CLCompileContext);
498  }
500  {
501  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
502 
503  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
504  {
506  return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
507  info,
508  m_CLCompileContext);
510  return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
511  info,
512  m_CLCompileContext);
513  default:
514  return nullptr;
515  }
516  }
517  case LayerType::LogSoftmax :
518  {
519  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
520 
521  return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
522  info,
523  m_MemoryManager->GetIntraLayerManager(),
524  m_CLCompileContext);
525  }
526  case LayerType::Lstm :
527  {
528  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
529  return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
530  }
531  case LayerType::Maximum :
532  {
533  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
534  return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
535  }
536  case LayerType::Mean :
537  {
538  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
539  return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
540  }
541  case LayerType::MemCopy :
542  {
543  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
544  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
545  {
546  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
547  }
548  return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
549  }
550  case LayerType::MemImport :
551  {
552  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
553  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
554  {
555  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
556  }
557  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
558  }
559  case LayerType::Minimum :
560  {
561  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
562  return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
563  }
565  {
566  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
567  return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
568  }
570  {
571  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
572  return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
573  info,
574  m_CLCompileContext);
575  }
576  case LayerType::Output :
577  {
578  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
579  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
580  }
581  case LayerType::Pad :
582  {
583  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
584  return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
585  }
586  case LayerType::Permute :
587  {
588  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
589  return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
590  }
591  case LayerType::Pooling2d :
592  {
593  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
594  return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
595  }
596  case LayerType::Pooling3d :
597  {
598  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
599  return MakeWorkload<ClPooling3dWorkload>(*pooling3dQueueDescriptor, info, m_CLCompileContext);
600  }
602  {
603  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
604  return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
605  }
606  case LayerType::Prelu :
607  {
608  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
609  return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
610  }
611  case LayerType::QLstm :
612  {
613  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
614  return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
615  }
616  case LayerType::Quantize :
617  {
618  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
619  return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
620  }
622  {
623  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
624  return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
625  }
626  case LayerType::Rank :
627  {
628  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
629  return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
630  }
631  case LayerType::Reduce :
632  {
633  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
634  return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
635  }
636  case LayerType::Reshape :
637  {
638  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
639  return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
640  }
641  case LayerType::Resize :
642  {
643  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
644  return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
645  }
646  case LayerType::Slice :
647  {
648  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
649  return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
650  }
651  case LayerType::Softmax :
652  {
653  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
654  return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
655  info,
656  m_MemoryManager->GetIntraLayerManager(),
657  m_CLCompileContext);
658  }
660  {
661  auto spaceToBatchNdQueueDescriptor
662  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
663  return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
664  }
666  {
667  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
668  return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
669  }
670  case LayerType::Splitter :
671  {
672  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
673  return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
674  }
675  case LayerType::Stack :
676  {
677  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
678  return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
679  }
681  {
682  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
683  return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
684  }
686  {
687  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
688  return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
689  }
690  case LayerType::Transpose :
691  {
692  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
693  return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
694  }
696  {
697  auto transposeConvolution2dQueueDescriptor
698  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
699  return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
700  info,
701  m_MemoryManager->GetIntraLayerManager(),
702  m_CLCompileContext);
703  }
705  {
706  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
707  return MakeWorkloadHelper<ClUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc,
708  info,
709  m_CLCompileContext);
710  }
711  default:
712  return nullptr;
713  }
714 }

References armnn::Abs, armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::Exp, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, ClBackendModelContext::IsFastMathEnabled(), armnn::L2Normalization, armnn::Log, armnn::LogicalAnd, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogicalOr, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Neg, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Rsqrt, armnn::Sin, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Sqrt, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 61 of file ClWorkloadFactory.cpp.

62 {
63  return s_Id;
64 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 53 of file ClWorkloadFactory.cpp.

57 {
58  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
59 }

References IWorkloadFactory::IsLayerSupported().

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 46 of file ClWorkloadFactory.cpp.

49 {
50  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
51 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 42 of file ClWorkloadFactory.hpp.

42 { return true; }

The documentation for this class was generated from the following files:
armnn::LayerType::Floor
@ Floor
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnnSerializer
Definition: ISerializer.hpp:11
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::UnaryOperation::Exp
@ Exp
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::LayerType::Reduce
@ Reduce
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::LayerType::Division
@ Division
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Reshape
@ Reshape
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Rank
@ Rank
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1518
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::UnaryOperation::Abs
@ Abs
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::Mean
@ Mean
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Concat
@ Concat
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::Output
@ Output
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::Dequantize
@ Dequantize
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::PreCompiled
@ PreCompiled