ArmNN
 23.08
RefWorkloadFactory Class Reference

#include <RefWorkloadFactory.hpp>

Inheritance diagram for RefWorkloadFactory:
[legend]
Collaboration diagram for RefWorkloadFactory:
[legend]

Public Member Functions

 RefWorkloadFactory (const std::shared_ptr< RefMemoryManager > &memoryManager)
 
 RefWorkloadFactory ()
 
 ~RefWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 Backends should implement their own CreateWorkload function with a switch statement. More...
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 27 of file RefWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ RefWorkloadFactory() [1/2]

RefWorkloadFactory ( const std::shared_ptr< RefMemoryManager > &  memoryManager)
explicit

Definition at line 77 of file RefWorkloadFactory.cpp.

78  : m_MemoryManager(memoryManager)
79 {
80 }

◆ RefWorkloadFactory() [2/2]

Definition at line 82 of file RefWorkloadFactory.cpp.

83  : m_MemoryManager(new RefMemoryManager())
84 {
85 }

◆ ~RefWorkloadFactory()

~RefWorkloadFactory ( )
inline

Definition at line 33 of file RefWorkloadFactory.hpp.

33 {}

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file RefWorkloadFactory.hpp.

52  {
53  IgnoreUnused(parent, subTensorShape, subTensorOrigin);
54  return nullptr;
55  }

References armnn::IgnoreUnused().

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 107 of file RefWorkloadFactory.cpp.

109 {
110  if (isMemoryManaged)
111  {
112  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
113  }
114  else
115  {
116  return std::make_unique<RefTensorHandle>(tensorInfo);
117  }
118 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 120 of file RefWorkloadFactory.cpp.

123 {
124  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
125  // to unmanaged memory. This also ensures memory alignment.
126  IgnoreUnused(isMemoryManaged, dataLayout);
127 
128  if (isMemoryManaged)
129  {
130  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
131  }
132  else
133  {
134  return std::make_unique<RefTensorHandle>(tensorInfo);
135  }
136 }

References armnn::IgnoreUnused().

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Backends should implement their own CreateWorkload function with a switch statement.

The case for the switch should be the LayerType and based on that they will call their specific workload creation functionality.

Implements IWorkloadFactory.

Definition at line 138 of file RefWorkloadFactory.cpp.

141 {
142  switch(type)
143  {
144  case LayerType::Activation :
145  {
146  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
147  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
148  }
149  case LayerType::Addition :
150  {
151  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
152  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
153  {
154  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
155  }
156  else
157  {
158  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
159  }
160  }
161  case LayerType::ArgMinMax :
162  {
163  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
164  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
165  }
167  {
168  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
169  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
170  }
172  {
173  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
174  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
175  }
177  {
178  auto batchToSpaceNdQueueDescriptor
179  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
180  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
181  }
182  case LayerType::Cast :
183  {
184  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
185  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
186  }
188  {
189  auto channelShuffleQueueDescriptor
190  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
191  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
192  }
193  case LayerType::Comparison :
194  {
195  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
196  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
197  }
198  case LayerType::Concat :
199  {
200  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
201  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
202  }
203  case LayerType::Constant :
204  {
205  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
206  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
207  }
209  {
210  auto convertFp16ToFp32QueueDescriptor
211  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
212  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
213  }
215  {
216  auto convertFp32ToFp16QueueDescriptor
217  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
218  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
219  }
221  {
222  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
223  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
224  }
226  {
227  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
228  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
229  }
230  case LayerType::Debug:
231  {
232  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
233  if (IsBFloat16(info))
234  {
235  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
236  }
237  if (IsFloat16(info))
238  {
239  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
240  }
241  if (IsQSymmS16(info))
242  {
243  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
244  }
245  if (IsQSymmS8(info))
246  {
247  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
248  }
249  if (IsQAsymmU8(info))
250  {
251  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
252  }
253  if (IsQAsymmS8(info))
254  {
255  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
256  }
257  if (IsSigned32(info))
258  {
259  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
260  }
261  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
262  }
264  {
265  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
266  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
267  }
269  {
270  auto depthwiseConvolution2DQueueDescriptor
271  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
272  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
273  }
275  {
276  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
277  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
278  }
280  {
281  auto detectionPostProcessQueueDescriptor
282  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
283  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
284  }
285  case LayerType::Division:
286  {
287  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
288  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
289  {
290  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
291  }
292  else
293  {
294  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
295  }
296  }
298  {
299  auto elementwiseBinaryQueueDescriptor
300  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
301  return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
302  }
304  {
305  auto elementwiseUnaryQueueDescriptor
306  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
307  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
308  {
309  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
310  }
311  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
312  }
314  {
315  auto fakeQuantizationQueueDescriptor
316  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
317  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
318  }
319  case LayerType::Fill:
320  {
321  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
322  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
323  }
324  case LayerType::Floor:
325  {
326  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
327  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
328  {
329  return nullptr;
330  }
331  else
332  {
333  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
334  }
335  }
337  {
338  auto fullyConnectedQueueDescriptor
339  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
340  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
341  }
342  case LayerType::Gather:
343  {
344  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
345  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
346  }
347  case LayerType::GatherNd:
348  {
349  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
350  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
351  }
352  case LayerType::Input:
353  {
354  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
355  if (info.m_InputTensorInfos.empty() )
356  {
357  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
358  }
359  if (info.m_OutputTensorInfos.empty())
360  {
361  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
362  }
363  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
364  {
365  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
366  "data input and output differ in byte count.");
367  }
368  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
369  }
371  {
372  auto instanceNormalizationQueueDescriptor
373  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
374  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
375  }
377  {
378  auto l2NormalizationQueueDescriptor
379  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
380  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
381  }
383  {
384  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
385  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
386  }
388  {
389  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
390  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
391  }
392  case LayerType::Lstm:
393  {
394  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
395  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
396  }
397  case LayerType::Maximum:
398  {
399  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
400  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
401  {
402  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
403  }
404  else
405  {
406  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
407  }
408  }
409  case LayerType::Mean:
410  {
411  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
412  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
413  }
414  case LayerType::MemCopy:
415  {
416  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
417  if (descriptor.m_Inputs.empty())
418  {
419  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
420  }
421  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
422  }
424  {
425  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
426  if (descriptor.m_Inputs.empty())
427  {
428  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
429  }
430  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
431  }
432  case LayerType::Minimum:
433  {
434  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
435  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
436  {
437  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
438  }
439  else
440  {
441  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
442  }
443  }
445  {
446  auto multiplicationQueueDescriptor
447  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
448  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
449  {
450  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
451  }
452  else
453  {
454  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
455  }
456  }
458  {
459  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
460  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
461  }
462  case LayerType::Output:
463  {
464  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
465  if (info.m_InputTensorInfos.empty() )
466  {
467  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
468  }
469  if (info.m_OutputTensorInfos.empty())
470  {
471  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
472  }
473  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
474  {
475  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
476  "differ in byte count.");
477  }
478  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
479  }
480  case LayerType::Pad:
481  {
482  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
483  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
484  }
485  case LayerType::Permute:
486  {
487  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
488  if (IsQSymmS16(info))
489  {
490  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
491  }
492  else if (IsBFloat16(info))
493  {
494  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
495  }
496  else if (IsQAsymmS8(info))
497  {
498  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
499  }
501  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
502  }
504  {
505  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
506  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
507  }
509  {
510  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
511  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
512  }
514  {
515  return nullptr;
516  }
517  case LayerType::Prelu:
518  {
519  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
520  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
521  }
522  case LayerType::QLstm:
523  {
524  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
525  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
526  }
527  case LayerType::Quantize:
528  {
529  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
530  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
531  }
532  case LayerType::Rank:
533  {
534  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
535  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
536  }
537  case LayerType::Reduce:
538  {
539  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
540  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
541  }
542  case LayerType::Reshape:
543  {
544  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
545  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
546  }
547  case LayerType::Resize:
548  {
549  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
550  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
551  }
553  {
554  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
555  return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
556  }
557  case LayerType::Shape:
558  {
559  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
560  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
561  }
562  case LayerType::Slice:
563  {
564  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
565  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
566  }
567  case LayerType::Softmax:
568  {
569  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
570  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
571  }
573  {
574  auto spaceToBatchNdQueueDescriptor
575  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
576  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
577  }
579  {
580  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
581  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
582  }
583  case LayerType::Splitter:
584  {
585  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
586  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
587  }
588  case LayerType::Stack:
589  {
590  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
591  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
592  }
594  {
595  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
596  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
597  }
599  {
600  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
601  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
602  {
603  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
604  }
605  else
606  {
607  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
608  }
609  }
610  case LayerType::Tile:
611  {
612  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
613  return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
614  }
616  {
617  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
618  if (IsQSymmS16(info))
619  {
620  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
621  }
622  else if (IsBFloat16(info))
623  {
624  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
625  }
626  else if (IsQAsymmS8(info))
627  {
628  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
629  }
630  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
631  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
632  (*transposeQueueDescriptor, info);
633  }
635  {
636  auto transposeConvolution2dQueueDescriptor
637  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
638  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
639  }
641  {
642  auto unidirectionalSequenceLstmQueueDescriptor
643  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
644  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
645  info);
646  }
647  default:
648  return nullptr;
649  }
650 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::IsBFloat16(), armnn::IsFloat16(), armnn::IsQAsymmS8(), armnn::IsQAsymmU8(), armnn::IsQSymmS16(), armnn::IsQSymmS8(), armnn::IsQuantizedType(), armnn::IsSigned32(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 87 of file RefWorkloadFactory.cpp.

88 {
89  return s_Id;
90 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 99 of file RefWorkloadFactory.cpp.

103 {
104  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
105 }

References IWorkloadFactory::IsLayerSupported().

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 92 of file RefWorkloadFactory.cpp.

95 {
96  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
97 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 46 of file RefWorkloadFactory.hpp.

46 { return false; }

The documentation for this class was generated from the following files:
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::IsQAsymmS8
bool IsQAsymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:68
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::IsFloat16
bool IsFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:56
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::IsBFloat16
bool IsBFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:52
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::RefTransposeFloat32Workload
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
Definition: RefTransposeWorkload.hpp:29
armnn::IsQSymmS16
bool IsQSymmS16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:60
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::Tile
@ Tile
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefPermuteFloat32Workload
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Definition: RefPermuteWorkload.hpp:29
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::RefPermuteFloat16Workload
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
Definition: RefPermuteWorkload.hpp:28
armnn::LayerType::Slice
@ Slice
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::IsQAsymmU8
bool IsQAsymmU8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:72
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::RefTransposeFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
Definition: RefTransposeWorkload.hpp:28
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1572
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::RefPermuteQAsymm8Workload
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
Definition: RefPermuteWorkload.hpp:31
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::IsQuantizedType
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:301
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IsSigned32
bool IsSigned32(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:48
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::RefTransposeQAsymm8Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
Definition: RefTransposeWorkload.hpp:31
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::IsQSymmS8
bool IsQSymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:64