ArmNN
 23.02
RefWorkloadFactory Class Reference

#include <RefWorkloadFactory.hpp>

Inheritance diagram for RefWorkloadFactory:
IWorkloadFactory

Public Member Functions

 RefWorkloadFactory (const std::shared_ptr< RefMemoryManager > &memoryManager)
 
 RefWorkloadFactory ()
 
 ~RefWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 30 of file RefWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ RefWorkloadFactory() [1/2]

RefWorkloadFactory ( const std::shared_ptr< RefMemoryManager > &  memoryManager)
explicit

Definition at line 83 of file RefWorkloadFactory.cpp.

84  : m_MemoryManager(memoryManager)
85 {
86 }

◆ RefWorkloadFactory() [2/2]

Definition at line 88 of file RefWorkloadFactory.cpp.

89  : m_MemoryManager(new RefMemoryManager())
90 {
91 }

◆ ~RefWorkloadFactory()

~RefWorkloadFactory ( )
inline

Definition at line 36 of file RefWorkloadFactory.hpp.

36 {}

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 52 of file RefWorkloadFactory.hpp.

55  {
56  IgnoreUnused(parent, subTensorShape, subTensorOrigin);
57  return nullptr;
58  }

References armnn::IgnoreUnused().

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 113 of file RefWorkloadFactory.cpp.

115 {
116  if (isMemoryManaged)
117  {
118  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
119  }
120  else
121  {
122  return std::make_unique<RefTensorHandle>(tensorInfo);
123  }
124 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 126 of file RefWorkloadFactory.cpp.

129 {
130  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
131  // to unmanaged memory. This also ensures memory alignment.
132  IgnoreUnused(isMemoryManaged, dataLayout);
133 
134  if (isMemoryManaged)
135  {
136  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
137  }
138  else
139  {
140  return std::make_unique<RefTensorHandle>(tensorInfo);
141  }
142 }

References armnn::IgnoreUnused().

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Reimplemented from IWorkloadFactory.

Definition at line 144 of file RefWorkloadFactory.cpp.

147 {
148  switch(type)
149  {
150  case LayerType::Activation :
151  {
152  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
153  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
154  }
155  case LayerType::Addition :
156  {
157  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
158 
159  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
160  {
161  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
162  }
163  else
164  {
165  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
166  }
167  }
168  case LayerType::ArgMinMax :
169  {
170  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
171  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
172  }
174  {
175  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
176  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
177  }
179  {
180  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
181  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
182  }
184  {
185  auto batchToSpaceNdQueueDescriptor
186  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
187  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
188  }
189  case LayerType::Cast :
190  {
191  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
192  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
193  }
195  {
196  auto channelShuffleQueueDescriptor
197  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
198  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
199  }
200  case LayerType::Comparison :
201  {
202  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
203  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
204  }
205  case LayerType::Concat :
206  {
207  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
208  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
209  }
210  case LayerType::Constant :
211  {
212  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
213  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
214  }
216  {
217  auto convertFp16ToFp32QueueDescriptor
218  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
219  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
220  }
222  {
223  auto convertFp32ToFp16QueueDescriptor
224  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
225  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
226  }
228  {
229  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
230  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
231  }
233  {
234  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
235  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
236  }
237  case LayerType::Debug:
238  {
239  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
240  if (IsBFloat16(info))
241  {
242  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
243  }
244  if (IsFloat16(info))
245  {
246  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
247  }
248  if (IsQSymmS16(info))
249  {
250  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
251  }
252  if (IsQSymmS8(info))
253  {
254  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
255  }
256  if (IsQAsymmU8(info))
257  {
258  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
259  }
260  if (IsQAsymmS8(info))
261  {
262  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
263  }
264  if (IsSigned32(info))
265  {
266  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
267  }
268 
269  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
270  }
272  {
273  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
274  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
275  }
277  {
278  auto depthwiseConvolution2DQueueDescriptor
279  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
280  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
281  }
283  {
284  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
285  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
286  }
288  {
289  auto detectionPostProcessQueueDescriptor
290  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
291  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
292  }
293  case LayerType::Division:
294  {
295  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
296  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
297  {
298  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
299  }
300  else
301  {
302  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
303  }
304  }
306  {
307  auto elementwiseUnaryQueueDescriptor
308  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
309  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
310  {
311  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
312  }
313  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
314  }
316  {
317  auto fakeQuantizationQueueDescriptor
318  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
319  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
320  }
321  case LayerType::Fill:
322  {
323  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
324  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
325  }
326  case LayerType::Floor:
327  {
328  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
329  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
330  {
331  return nullptr;
332  }
333  else
334  {
335  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
336  }
337  }
339  {
340  auto fullyConnectedQueueDescriptor
341  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
342  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
343  }
344  case LayerType::Gather:
345  {
346  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
347  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
348  }
349  case LayerType::GatherNd:
350  {
351  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
352  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
353  }
354  case LayerType::Input:
355  {
356  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
357  if (info.m_InputTensorInfos.empty() )
358  {
359  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
360  }
361  if (info.m_OutputTensorInfos.empty())
362  {
363  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
364  }
365 
366  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
367  {
368  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
369  "data input and output differ in byte count.");
370  }
371 
372  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
373  }
375  {
376  auto instanceNormalizationQueueDescriptor
377  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
378  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
379  }
381  {
382  auto l2NormalizationQueueDescriptor
383  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
384  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
385  }
387  {
388  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
389  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
390  }
392  {
393  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
394  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
395  }
396  case LayerType::Lstm:
397  {
398  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
399  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
400  }
401  case LayerType::Maximum:
402  {
403  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
404  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
405  {
406  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
407  }
408  else
409  {
410  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
411  }
412  }
413  case LayerType::Mean:
414  {
415  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
416  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
417  }
418  case LayerType::MemCopy:
419  {
420  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
421  if (descriptor.m_Inputs.empty())
422  {
423  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
424  }
425  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
426  }
428  {
429  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
430  if (descriptor.m_Inputs.empty())
431  {
432  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
433  }
434  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
435  }
436  case LayerType::Minimum:
437  {
438  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
439  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
440  {
441  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
442  }
443  else
444  {
445  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
446  }
447  }
449  {
450  auto multiplicationQueueDescriptor
451  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
452  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
453  {
454  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
455  }
456  else
457  {
458  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
459  }
460  }
462  {
463  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
464  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
465  }
466  case LayerType::Output:
467  {
468  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
469  if (info.m_InputTensorInfos.empty() )
470  {
471  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
472  }
473  if (info.m_OutputTensorInfos.empty())
474  {
475  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
476  }
477  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
478  {
479  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
480  "differ in byte count.");
481  }
482 
483  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
484  }
485  case LayerType::Pad:
486  {
487  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
488  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
489  }
490  case LayerType::Permute:
491  {
492  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
493  if (IsQSymmS16(info))
494  {
495  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
496  }
497  else if (IsBFloat16(info))
498  {
499  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
500  }
501  else if (IsQAsymmS8(info))
502  {
503  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
504  }
506  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
507  }
509  {
510  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
511  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
512  }
514  {
515  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
516  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
517  }
519  {
520  return nullptr;
521  }
522  case LayerType::Prelu:
523  {
524  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
525  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
526  }
527  case LayerType::QLstm:
528  {
529  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
530  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
531  }
532  case LayerType::Quantize:
533  {
534  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
535  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
536  }
537  case LayerType::Rank:
538  {
539  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
540  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
541  }
542  case LayerType::Reduce:
543  {
544  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
545  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
546  }
547  case LayerType::Reshape:
548  {
549  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
550  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
551  }
552  case LayerType::Resize:
553  {
554  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
555  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
556  }
557  case LayerType::Shape:
558  {
559  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
560  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
561  }
562  case LayerType::Slice:
563  {
564  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
565  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
566  }
567  case LayerType::Softmax:
568  {
569  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
570  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
571  }
573  {
574  auto spaceToBatchNdQueueDescriptor
575  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
576  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
577  }
579  {
580  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
581  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
582  }
583  case LayerType::Splitter:
584  {
585  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
586  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
587  }
588  case LayerType::Stack:
589  {
590  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
591  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
592  }
594  {
595  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
596  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
597  }
599  {
600  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
601  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
602  {
603  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
604  }
605  else
606  {
607  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
608  }
609  }
611  {
612  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
613  if (IsQSymmS16(info))
614  {
615  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
616  }
617  else if (IsBFloat16(info))
618  {
619  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
620  }
621  else if (IsQAsymmS8(info))
622  {
623  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
624  }
625  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
626  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
627  (*transposeQueueDescriptor, info);
628  }
630  {
631  auto transposeConvolution2dQueueDescriptor
632  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
633  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
634  }
636  {
637  auto unidirectionalSequenceLstmQueueDescriptor
638  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
639  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
640  info);
641  }
642  default:
643  return nullptr;
644  }
645 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::IsBFloat16(), armnn::IsFloat16(), armnn::IsQAsymmS8(), armnn::IsQAsymmU8(), armnn::IsQSymmS16(), armnn::IsQSymmS8(), armnn::IsQuantizedType(), armnn::IsSigned32(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 93 of file RefWorkloadFactory.cpp.

94 {
95  return s_Id;
96 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 105 of file RefWorkloadFactory.cpp.

109 {
110  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
111 }

References IWorkloadFactory::IsLayerSupported().

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 98 of file RefWorkloadFactory.cpp.

101 {
102  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
103 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file RefWorkloadFactory.hpp.

49 { return false; }

The documentation for this class was generated from the following files:
armnn::LayerType::Floor
@ Floor
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::IsQAsymmS8
bool IsQAsymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:73
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Quantize
@ Quantize
armnn::IsQuantizedType
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:284
armnn::IsSigned32
bool IsSigned32(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:48
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Shape
@ Shape
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Permute
@ Permute
armnn::IsQSymmS8
bool IsQSymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:68
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::IsFloat16
bool IsFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:58
armnn::LayerType::Addition
@ Addition
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::Division
@ Division
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::IsBFloat16
bool IsBFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:53
armnn::RefPermuteFloat16Workload
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
Definition: RefPermuteWorkload.hpp:34
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
armnn::RefTransposeQAsymm8Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
Definition: RefTransposeWorkload.hpp:37
armnn::LayerType::Rank
@ Rank
armnn::RefTransposeFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
Definition: RefTransposeWorkload.hpp:34
armnn::RefPermuteQAsymm8Workload
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
Definition: RefPermuteWorkload.hpp:37
armnn::IsQSymmS16
bool IsQSymmS16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:63
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1518
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::RefTransposeFloat32Workload
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
Definition: RefTransposeWorkload.hpp:35
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::Mean
@ Mean
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::RefPermuteFloat32Workload
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Definition: RefPermuteWorkload.hpp:35
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Concat
@ Concat
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::Output
@ Output
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::Dequantize
@ Dequantize
armnn::IsQAsymmU8
bool IsQAsymmU8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:78
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::PreCompiled
@ PreCompiled