ArmNN
 22.05.01
RefWorkloadFactory Class Reference

#include <RefWorkloadFactory.hpp>

Inheritance diagram for RefWorkloadFactory:
IWorkloadFactory

Public Member Functions

 RefWorkloadFactory (const std::shared_ptr< RefMemoryManager > &memoryManager)
 
 RefWorkloadFactory ()
 
 ~RefWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 
virtual std::unique_ptr< IWorkloadCreateInput (const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 30 of file RefWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ RefWorkloadFactory() [1/2]

RefWorkloadFactory ( const std::shared_ptr< RefMemoryManager > &  memoryManager)
explicit

Definition at line 83 of file RefWorkloadFactory.cpp.

84  : m_MemoryManager(memoryManager)
85 {
86 }

◆ RefWorkloadFactory() [2/2]

Definition at line 88 of file RefWorkloadFactory.cpp.

89  : m_MemoryManager(new RefMemoryManager())
90 {
91 }

◆ ~RefWorkloadFactory()

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
inlineoverridevirtual

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 113 of file RefWorkloadFactory.cpp.

References armnn::Malloc.

Referenced by RefWorkloadFactory::CreateSubTensorHandle().

115 {
116  if (isMemoryManaged)
117  {
118  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
119  }
120  else
121  {
122  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
123  }
124 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 126 of file RefWorkloadFactory.cpp.

References armnn::IgnoreUnused(), and armnn::Malloc.

129 {
130  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
131  // to unmanaged memory. This also ensures memory alignment.
132  IgnoreUnused(isMemoryManaged, dataLayout);
133 
134  if (isMemoryManaged)
135  {
136  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
137  }
138  else
139  {
140  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
141  }
142 }
void IgnoreUnused(Ts &&...)

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Reimplemented from IWorkloadFactory.

Definition at line 144 of file RefWorkloadFactory.cpp.

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, IWorkloadFactory::CreateInput(), armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::IsBFloat16(), armnn::IsFloat16(), armnn::IsQAsymmS8(), armnn::IsQAsymmU8(), armnn::IsQSymmS16(), armnn::IsQSymmS8(), armnn::IsQuantizedType(), armnn::IsSigned32(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, WorkloadInfo::m_InputTensorInfos, ElementwiseUnaryDescriptor::m_Operation, WorkloadInfo::m_OutputTensorInfos, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

Referenced by RefWorkloadFactory::CreateSubTensorHandle().

147 {
148  switch(type)
149  {
150  case LayerType::Activation :
151  {
152  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
153  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
154  }
155  case LayerType::Addition :
156  {
157  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
158 
159  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
160  {
161  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
162  }
163  else
164  {
165  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
166  }
167  }
168  case LayerType::ArgMinMax :
169  {
170  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
171  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
172  }
174  {
175  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
177  }
179  {
180  auto batchToSpaceNdQueueDescriptor
181  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183  }
184  case LayerType::Cast :
185  {
186  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
188  }
190  {
191  auto channelShuffleQueueDescriptor
192  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
193  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
194  }
195  case LayerType::Comparison :
196  {
197  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
198  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
199  }
200  case LayerType::Concat :
201  {
202  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
203  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
204  }
205  case LayerType::Constant :
206  {
207  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
208  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
209  }
211  {
212  auto convertBf16ToFp32QueueDescriptor
213  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
214  return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
215  }
217  {
218  auto convertFp16ToFp32QueueDescriptor
219  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
220  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
221  }
223  {
224  auto convertFp32ToBf16QueueDescriptor
225  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
226  return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
227  }
229  {
230  auto convertFp32ToFp16QueueDescriptor
231  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
232  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
233  }
235  {
236  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
237  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
238  }
240  {
241  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
242  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
243  }
244  case LayerType::Debug:
245  {
246  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
247  if (IsBFloat16(info))
248  {
249  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
250  }
251  if (IsFloat16(info))
252  {
253  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
254  }
255  if (IsQSymmS16(info))
256  {
257  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
258  }
259  if (IsQSymmS8(info))
260  {
261  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
262  }
263  if (IsQAsymmU8(info))
264  {
265  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
266  }
267  if (IsQAsymmS8(info))
268  {
269  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
270  }
271  if (IsSigned32(info))
272  {
273  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
274  }
275 
276  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
277  }
279  {
280  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
281  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
282  }
284  {
285  auto depthwiseConvolution2DQueueDescriptor
286  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
287  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
288  }
290  {
291  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
292  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
293  }
295  {
296  auto detectionPostProcessQueueDescriptor
297  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
298  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
299  }
300  case LayerType::Division:
301  {
302  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
303  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
304  {
305  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
306  }
307  else
308  {
309  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
310  }
311  }
313  {
314  auto elementwiseUnaryQueueDescriptor
315  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
316  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
317  {
318  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
319  }
320  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
321  }
323  {
324  auto fakeQuantizationQueueDescriptor
325  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
326  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
327  }
328  case LayerType::Fill:
329  {
330  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
331  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
332  }
333  case LayerType::Floor:
334  {
335  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
336  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
337  {
338  return nullptr;
339  }
340  else
341  {
342  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
343  }
344  }
346  {
347  auto fullyConnectedQueueDescriptor
348  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
349  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
350  }
351  case LayerType::Gather:
352  {
353  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
354  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
355  }
356  case LayerType::GatherNd:
357  {
358  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
359  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
360  }
361  case LayerType::Input:
362  {
363  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
364  if (info.m_InputTensorInfos.empty() )
365  {
366  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
367  }
368  if (info.m_OutputTensorInfos.empty())
369  {
370  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
371  }
372 
373  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
374  {
375  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
376  "data input and output differ in byte count.");
377  }
378 
379  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
380  }
382  {
383  auto instanceNormalizationQueueDescriptor
384  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
385  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
386  }
388  {
389  auto l2NormalizationQueueDescriptor
390  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
391  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
392  }
394  {
395  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
396  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
397  }
399  {
400  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
401  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
402  }
403  case LayerType::Lstm:
404  {
405  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
406  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
407  }
408  case LayerType::Maximum:
409  {
410  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
411  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
412  {
413  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
414  }
415  else
416  {
417  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
418  }
419  }
420  case LayerType::Mean:
421  {
422  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
423  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
424  }
425  case LayerType::MemCopy:
426  {
427  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
428  if (descriptor.m_Inputs.empty())
429  {
430  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
431  }
432  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
433  }
435  {
436  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
437  if (descriptor.m_Inputs.empty())
438  {
439  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
440  }
441  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
442  }
443  case LayerType::Minimum:
444  {
445  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
446  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
447  {
448  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
449  }
450  else
451  {
452  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
453  }
454  }
456  {
457  auto multiplicationQueueDescriptor
458  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
459  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
460  {
461  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
462  }
463  else
464  {
465  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
466  }
467  }
469  {
470  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
471  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
472  }
473  case LayerType::Output:
474  {
475  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
476  if (info.m_InputTensorInfos.empty() )
477  {
478  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
479  }
480  if (info.m_OutputTensorInfos.empty())
481  {
482  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
483  }
484  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
485  {
486  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
487  "differ in byte count.");
488  }
489 
490  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
491  }
492  case LayerType::Pad:
493  {
494  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
495  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
496  }
497  case LayerType::Permute:
498  {
499  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
500  if (IsQSymmS16(info))
501  {
502  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
503  }
504  else if (IsBFloat16(info))
505  {
506  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
507  }
508  else if (IsQAsymmS8(info))
509  {
510  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
511  }
513  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
514  }
516  {
517  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
518  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
519  }
521  {
522  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
523  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
524  }
526  {
527  return nullptr;
528  }
529  case LayerType::Prelu:
530  {
531  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
532  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
533  }
534  case LayerType::QLstm:
535  {
536  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
537  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
538  }
539  case LayerType::Quantize:
540  {
541  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
542  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
543  }
544  case LayerType::Rank:
545  {
546  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
547  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
548  }
549  case LayerType::Reduce:
550  {
551  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
552  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
553  }
554  case LayerType::Reshape:
555  {
556  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
557  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
558  }
559  case LayerType::Resize:
560  {
561  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
562  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
563  }
564  case LayerType::Shape:
565  {
566  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
567  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
568  }
569  case LayerType::Slice:
570  {
571  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
572  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
573  }
574  case LayerType::Softmax:
575  {
576  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
577  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
578  }
580  {
581  auto spaceToBatchNdQueueDescriptor
582  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
583  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
584  }
586  {
587  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
588  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
589  }
590  case LayerType::Splitter:
591  {
592  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
593  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
594  }
595  case LayerType::Stack:
596  {
597  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
598  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
599  }
601  {
602  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
603  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
604  }
606  {
607  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
608  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
609  {
610  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
611  }
612  else
613  {
614  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
615  }
616  }
618  {
619  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
620  if (IsQSymmS16(info))
621  {
622  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
623  }
624  else if (IsBFloat16(info))
625  {
626  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
627  }
628  else if (IsQAsymmS8(info))
629  {
630  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
631  }
632  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
633  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
634  (*transposeQueueDescriptor, info);
635  }
637  {
638  auto transposeConvolution2dQueueDescriptor
639  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
640  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
641  }
643  {
644  auto unidirectionalSequenceLstmQueueDescriptor
645  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
646  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
647  info);
648  }
649  default:
650  return nullptr;
651  }
652 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
bool IsQAsymmS8(const WorkloadInfo &info)
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
bool IsQAsymmU8(const WorkloadInfo &info)
bool IsQSymmS8(const WorkloadInfo &info)
bool IsBFloat16(const WorkloadInfo &info)
bool IsFloat16(const WorkloadInfo &info)
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
bool IsSigned32(const WorkloadInfo &info)
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
bool IsQSymmS16(const WorkloadInfo &info)

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 93 of file RefWorkloadFactory.cpp.

Referenced by RefWorkloadFactory::~RefWorkloadFactory().

94 {
95  return s_Id;
96 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 98 of file RefWorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

Referenced by RefWorkloadFactory::~RefWorkloadFactory().

101 {
102  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
103 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 105 of file RefWorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

109 {
110  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
111 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file RefWorkloadFactory.hpp.

References ARMNN_DEPRECATED_MSG.

49 { return false; }

The documentation for this class was generated from the following files: