ArmNN
 23.11
RefWorkloadFactory Class Reference

#include <RefWorkloadFactory.hpp>

Inheritance diagram for RefWorkloadFactory:
[legend]
Collaboration diagram for RefWorkloadFactory:
[legend]

Public Member Functions

 RefWorkloadFactory (const std::shared_ptr< RefMemoryManager > &memoryManager)
 
 RefWorkloadFactory ()
 
 ~RefWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 Backends should implement their own CreateWorkload function with a switch statement. More...
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 27 of file RefWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ RefWorkloadFactory() [1/2]

RefWorkloadFactory ( const std::shared_ptr< RefMemoryManager > &  memoryManager)
explicit

Definition at line 81 of file RefWorkloadFactory.cpp.

82  : m_MemoryManager(memoryManager)
83 {
84 }

◆ RefWorkloadFactory() [2/2]

Definition at line 86 of file RefWorkloadFactory.cpp.

87  : m_MemoryManager(new RefMemoryManager())
88 {
89 }

◆ ~RefWorkloadFactory()

~RefWorkloadFactory ( )
inline

Definition at line 33 of file RefWorkloadFactory.hpp.

33 {}

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file RefWorkloadFactory.hpp.

52  {
53  IgnoreUnused(parent, subTensorShape, subTensorOrigin);
54  return nullptr;
55  }

References armnn::IgnoreUnused().

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 111 of file RefWorkloadFactory.cpp.

113 {
114  if (isMemoryManaged)
115  {
116  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
117  }
118  else
119  {
120  return std::make_unique<RefTensorHandle>(tensorInfo);
121  }
122 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 124 of file RefWorkloadFactory.cpp.

127 {
128  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
129  // to unmanaged memory. This also ensures memory alignment.
130  IgnoreUnused(isMemoryManaged, dataLayout);
131 
132  if (isMemoryManaged)
133  {
134  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
135  }
136  else
137  {
138  return std::make_unique<RefTensorHandle>(tensorInfo);
139  }
140 }

References armnn::IgnoreUnused().

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Backends should implement their own CreateWorkload function with a switch statement.

The case for the switch should be the LayerType and based on that they will call their specific workload creation functionality.

Implements IWorkloadFactory.

Definition at line 142 of file RefWorkloadFactory.cpp.

145 {
146  switch(type)
147  {
148  case LayerType::Activation :
149  {
150  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
151  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
152  }
153  case LayerType::Addition :
154  {
155  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
156  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
157  {
158  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
159  }
160  else
161  {
162  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
163  }
164  }
165  case LayerType::ArgMinMax :
166  {
167  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
168  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
169  }
171  {
172  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
173  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
174  }
176  {
177  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
178  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
179  }
181  {
182  auto batchToSpaceNdQueueDescriptor
183  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
184  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
185  }
187  {
188  auto broadcastToQueueDescriptor = PolymorphicDowncast<const BroadcastToQueueDescriptor*>(&descriptor);
189  return std::make_unique<RefBroadcastToWorkload>(*broadcastToQueueDescriptor, info);
190  }
191  case LayerType::Cast :
192  {
193  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
194  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
195  }
197  {
198  auto channelShuffleQueueDescriptor
199  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
200  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
201  }
202  case LayerType::Comparison :
203  {
204  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
205  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
206  }
207  case LayerType::Concat :
208  {
209  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
210  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
211  }
212  case LayerType::Constant :
213  {
214  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
215  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
216  }
218  {
219  auto convertFp16ToFp32QueueDescriptor
220  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
221  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
222  }
224  {
225  auto convertFp32ToFp16QueueDescriptor
226  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
227  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
228  }
230  {
231  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
232  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
233  }
235  {
236  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
237  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
238  }
239  case LayerType::Debug:
240  {
241  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
242  if (IsBFloat16(info))
243  {
244  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
245  }
246  if (IsFloat16(info))
247  {
248  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
249  }
250  if (IsQSymmS16(info))
251  {
252  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
253  }
254  if (IsQSymmS8(info))
255  {
256  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
257  }
258  if (IsQAsymmU8(info))
259  {
260  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
261  }
262  if (IsQAsymmS8(info))
263  {
264  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
265  }
266  if (IsSigned32(info))
267  {
268  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
269  }
270  if (IsSigned64(info))
271  {
272  return std::make_unique<RefDebugSigned64Workload>(*debugQueueDescriptor, info);
273  }
274  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
275  }
277  {
278  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
279  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
280  }
282  {
283  auto depthwiseConvolution2DQueueDescriptor
284  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
285  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
286  }
288  {
289  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
290  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
291  }
293  {
294  auto detectionPostProcessQueueDescriptor
295  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
296  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
297  }
298  case LayerType::Division:
299  {
300  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
301  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
302  {
303  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
304  }
305  else
306  {
307  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
308  }
309  }
311  {
312  auto elementwiseBinaryQueueDescriptor
313  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
314  return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
315  }
317  {
318  auto elementwiseUnaryQueueDescriptor
319  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
320  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
321  {
322  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
323  }
324  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
325  }
327  {
328  auto fakeQuantizationQueueDescriptor
329  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
330  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
331  }
332  case LayerType::Fill:
333  {
334  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
335  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
336  }
337  case LayerType::Floor:
338  {
339  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
340  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
341  {
342  return nullptr;
343  }
344  else
345  {
346  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
347  }
348  }
350  {
351  auto fullyConnectedQueueDescriptor
352  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
353  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
354  }
355  case LayerType::Gather:
356  {
357  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
358  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
359  }
360  case LayerType::GatherNd:
361  {
362  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
363  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
364  }
365  case LayerType::Input:
366  {
367  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
368  if (info.m_InputTensorInfos.empty() )
369  {
370  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
371  }
372  if (info.m_OutputTensorInfos.empty())
373  {
374  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
375  }
376  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
377  {
378  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
379  "data input and output differ in byte count.");
380  }
381  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
382  }
384  {
385  auto instanceNormalizationQueueDescriptor
386  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
387  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
388  }
390  {
391  auto l2NormalizationQueueDescriptor
392  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
393  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
394  }
396  {
397  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
398  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
399  }
401  {
402  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
403  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
404  }
405  case LayerType::Lstm:
406  {
407  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
408  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
409  }
410  case LayerType::Maximum:
411  {
412  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
413  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
414  {
415  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
416  }
417  else
418  {
419  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
420  }
421  }
422  case LayerType::Mean:
423  {
424  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
425  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
426  }
427  case LayerType::MemCopy:
428  {
429  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
430  if (descriptor.m_Inputs.empty())
431  {
432  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
433  }
434  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
435  }
437  {
438  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
439  if (descriptor.m_Inputs.empty())
440  {
441  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
442  }
443  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
444  }
445  case LayerType::Minimum:
446  {
447  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
448  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
449  {
450  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
451  }
452  else
453  {
454  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
455  }
456  }
458  {
459  auto multiplicationQueueDescriptor
460  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
461  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
462  {
463  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
464  }
465  else
466  {
467  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
468  }
469  }
471  {
472  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
473  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
474  }
475  case LayerType::Output:
476  {
477  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
478  if (info.m_InputTensorInfos.empty() )
479  {
480  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
481  }
482  if (info.m_OutputTensorInfos.empty())
483  {
484  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
485  }
486  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
487  {
488  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
489  "differ in byte count.");
490  }
491  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
492  }
493  case LayerType::Pad:
494  {
495  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
496  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
497  }
498  case LayerType::Permute:
499  {
500  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
501  if (IsQSymmS16(info))
502  {
503  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
504  }
505  else if (IsBFloat16(info))
506  {
507  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
508  }
509  else if (IsQAsymmS8(info))
510  {
511  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
512  }
514  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
515  }
517  {
518  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
519  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
520  }
522  {
523  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
524  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
525  }
527  {
528  return nullptr;
529  }
530  case LayerType::Prelu:
531  {
532  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
533  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
534  }
535  case LayerType::QLstm:
536  {
537  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
538  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
539  }
540  case LayerType::Quantize:
541  {
542  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
543  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
544  }
545  case LayerType::Rank:
546  {
547  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
548  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
549  }
550  case LayerType::Reduce:
551  {
552  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
553  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
554  }
555  case LayerType::Reshape:
556  {
557  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
558  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
559  }
560  case LayerType::Resize:
561  {
562  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
563  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
564  }
566  {
567  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
568  return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
569  }
570  case LayerType::Shape:
571  {
572  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
573  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
574  }
575  case LayerType::Slice:
576  {
577  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
578  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
579  }
580  case LayerType::Softmax:
581  {
582  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
583  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
584  }
586  {
587  auto spaceToBatchNdQueueDescriptor
588  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
589  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
590  }
592  {
593  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
594  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
595  }
596  case LayerType::Splitter:
597  {
598  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
599  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
600  }
601  case LayerType::Stack:
602  {
603  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
604  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
605  }
607  {
608  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
609  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
610  }
612  {
613  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
614  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
615  {
616  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
617  }
618  else
619  {
620  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
621  }
622  }
623  case LayerType::Tile:
624  {
625  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
626  return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
627  }
629  {
630  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
631  if (IsQSymmS16(info))
632  {
633  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
634  }
635  else if (IsBFloat16(info))
636  {
637  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
638  }
639  else if (IsQAsymmS8(info))
640  {
641  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
642  }
643  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
644  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
645  (*transposeQueueDescriptor, info);
646  }
648  {
649  auto transposeConvolution2dQueueDescriptor
650  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
651  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
652  }
654  {
655  auto unidirectionalSequenceLstmQueueDescriptor
656  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
657  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
658  info);
659  }
660  default:
661  return nullptr;
662  }
663 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BroadcastTo, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::IsBFloat16(), armnn::IsFloat16(), armnn::IsQAsymmS8(), armnn::IsQAsymmU8(), armnn::IsQSymmS16(), armnn::IsQSymmS8(), armnn::IsQuantizedType(), armnn::IsSigned32(), armnn::IsSigned64(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 91 of file RefWorkloadFactory.cpp.

92 {
93  return s_Id;
94 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 103 of file RefWorkloadFactory.cpp.

107 {
108  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
109 }

References IWorkloadFactory::IsLayerSupported().

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 96 of file RefWorkloadFactory.cpp.

99 {
100  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
101 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 46 of file RefWorkloadFactory.hpp.

46 { return false; }

The documentation for this class was generated from the following files:
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::IsQAsymmS8
bool IsQAsymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:72
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::IsFloat16
bool IsFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:60
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::IsBFloat16
bool IsBFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:56
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::RefTransposeFloat32Workload
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
Definition: RefTransposeWorkload.hpp:29
armnn::IsQSymmS16
bool IsQSymmS16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:64
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::Tile
@ Tile
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefPermuteFloat32Workload
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Definition: RefPermuteWorkload.hpp:29
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::IsSigned64
bool IsSigned64(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:48
armnn::RefPermuteFloat16Workload
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
Definition: RefPermuteWorkload.hpp:28
armnn::LayerType::Slice
@ Slice
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::IsQAsymmU8
bool IsQAsymmU8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:76
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::RefTransposeFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
Definition: RefTransposeWorkload.hpp:28
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::RefPermuteQAsymm8Workload
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
Definition: RefPermuteWorkload.hpp:31
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::IsQuantizedType
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:311
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IsSigned32
bool IsSigned32(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:52
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::RefTransposeQAsymm8Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
Definition: RefTransposeWorkload.hpp:31
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::IsQSymmS8
bool IsQSymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:68