ArmNN
 22.11
RefWorkloadFactory Class Reference

#include <RefWorkloadFactory.hpp>

Inheritance diagram for RefWorkloadFactory:
IWorkloadFactory

Public Member Functions

 RefWorkloadFactory (const std::shared_ptr< RefMemoryManager > &memoryManager)
 
 RefWorkloadFactory ()
 
 ~RefWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 30 of file RefWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ RefWorkloadFactory() [1/2]

RefWorkloadFactory ( const std::shared_ptr< RefMemoryManager > &  memoryManager)
explicit

Definition at line 83 of file RefWorkloadFactory.cpp.

84  : m_MemoryManager(memoryManager)
85 {
86 }

◆ RefWorkloadFactory() [2/2]

Definition at line 88 of file RefWorkloadFactory.cpp.

89  : m_MemoryManager(new RefMemoryManager())
90 {
91 }

◆ ~RefWorkloadFactory()

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 52 of file RefWorkloadFactory.hpp.

References ARMNN_DEPRECATED_MSG, ARMNN_DEPRECATED_MSG_REMOVAL_DATE, RefWorkloadFactory::CreateTensorHandle(), RefWorkloadFactory::CreateWorkload(), armnn::IgnoreUnused(), armnn::Info, and armnn::info.

55  {
56  IgnoreUnused(parent, subTensorShape, subTensorOrigin);
57  return nullptr;
58  }
void IgnoreUnused(Ts &&...)

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 113 of file RefWorkloadFactory.cpp.

References armnn::Malloc.

Referenced by RefWorkloadFactory::CreateSubTensorHandle().

115 {
116  if (isMemoryManaged)
117  {
118  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
119  }
120  else
121  {
122  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
123  }
124 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 126 of file RefWorkloadFactory.cpp.

References armnn::IgnoreUnused(), and armnn::Malloc.

129 {
130  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
131  // to unmanaged memory. This also ensures memory alignment.
132  IgnoreUnused(isMemoryManaged, dataLayout);
133 
134  if (isMemoryManaged)
135  {
136  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
137  }
138  else
139  {
140  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
141  }
142 }
void IgnoreUnused(Ts &&...)

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Reimplemented from IWorkloadFactory.

Definition at line 144 of file RefWorkloadFactory.cpp.

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::IsBFloat16(), armnn::IsFloat16(), armnn::IsQAsymmS8(), armnn::IsQAsymmU8(), armnn::IsQSymmS16(), armnn::IsQSymmS8(), armnn::IsQuantizedType(), armnn::IsSigned32(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, WorkloadInfo::m_InputTensorInfos, ElementwiseUnaryDescriptor::m_Operation, WorkloadInfo::m_OutputTensorInfos, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

Referenced by RefWorkloadFactory::CreateSubTensorHandle().

147 {
148  switch(type)
149  {
150  case LayerType::Activation :
151  {
152  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
153  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
154  }
155  case LayerType::Addition :
156  {
157  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
158 
159  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
160  {
161  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
162  }
163  else
164  {
165  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
166  }
167  }
168  case LayerType::ArgMinMax :
169  {
170  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
171  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
172  }
174  {
175  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
176  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
177  }
179  {
180  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
181  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
182  }
184  {
185  auto batchToSpaceNdQueueDescriptor
186  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
187  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
188  }
189  case LayerType::Cast :
190  {
191  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
192  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
193  }
195  {
196  auto channelShuffleQueueDescriptor
197  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
198  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
199  }
200  case LayerType::Comparison :
201  {
202  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
203  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
204  }
205  case LayerType::Concat :
206  {
207  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
208  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
209  }
210  case LayerType::Constant :
211  {
212  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
213  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
214  }
216  {
217  auto convertBf16ToFp32QueueDescriptor
218  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
219  return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
220  }
222  {
223  auto convertFp16ToFp32QueueDescriptor
224  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
225  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
226  }
228  {
229  auto convertFp32ToBf16QueueDescriptor
230  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
231  return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
232  }
234  {
235  auto convertFp32ToFp16QueueDescriptor
236  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
237  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
238  }
240  {
241  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
242  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
243  }
245  {
246  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
247  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
248  }
249  case LayerType::Debug:
250  {
251  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
252  if (IsBFloat16(info))
253  {
254  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
255  }
256  if (IsFloat16(info))
257  {
258  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
259  }
260  if (IsQSymmS16(info))
261  {
262  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
263  }
264  if (IsQSymmS8(info))
265  {
266  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
267  }
268  if (IsQAsymmU8(info))
269  {
270  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
271  }
272  if (IsQAsymmS8(info))
273  {
274  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
275  }
276  if (IsSigned32(info))
277  {
278  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
279  }
280 
281  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
282  }
284  {
285  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
286  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
287  }
289  {
290  auto depthwiseConvolution2DQueueDescriptor
291  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
292  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
293  }
295  {
296  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
297  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
298  }
300  {
301  auto detectionPostProcessQueueDescriptor
302  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
303  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
304  }
305  case LayerType::Division:
306  {
307  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
308  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
309  {
310  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
311  }
312  else
313  {
314  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
315  }
316  }
318  {
319  auto elementwiseUnaryQueueDescriptor
320  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
321  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
322  {
323  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
324  }
325  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
326  }
328  {
329  auto fakeQuantizationQueueDescriptor
330  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
331  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
332  }
333  case LayerType::Fill:
334  {
335  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
336  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
337  }
338  case LayerType::Floor:
339  {
340  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
341  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
342  {
343  return nullptr;
344  }
345  else
346  {
347  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
348  }
349  }
351  {
352  auto fullyConnectedQueueDescriptor
353  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
354  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
355  }
356  case LayerType::Gather:
357  {
358  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
359  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
360  }
361  case LayerType::GatherNd:
362  {
363  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
364  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
365  }
366  case LayerType::Input:
367  {
368  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
369  if (info.m_InputTensorInfos.empty() )
370  {
371  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
372  }
373  if (info.m_OutputTensorInfos.empty())
374  {
375  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
376  }
377 
378  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
379  {
380  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
381  "data input and output differ in byte count.");
382  }
383 
384  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
385  }
387  {
388  auto instanceNormalizationQueueDescriptor
389  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
390  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
391  }
393  {
394  auto l2NormalizationQueueDescriptor
395  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
396  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
397  }
399  {
400  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
401  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
402  }
404  {
405  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
406  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
407  }
408  case LayerType::Lstm:
409  {
410  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
411  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
412  }
413  case LayerType::Maximum:
414  {
415  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
416  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
417  {
418  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
419  }
420  else
421  {
422  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
423  }
424  }
425  case LayerType::Mean:
426  {
427  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
428  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
429  }
430  case LayerType::MemCopy:
431  {
432  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
433  if (descriptor.m_Inputs.empty())
434  {
435  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
436  }
437  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
438  }
440  {
441  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
442  if (descriptor.m_Inputs.empty())
443  {
444  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
445  }
446  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
447  }
448  case LayerType::Minimum:
449  {
450  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
451  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
452  {
453  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
454  }
455  else
456  {
457  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
458  }
459  }
461  {
462  auto multiplicationQueueDescriptor
463  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
464  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
465  {
466  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
467  }
468  else
469  {
470  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
471  }
472  }
474  {
475  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
476  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
477  }
478  case LayerType::Output:
479  {
480  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
481  if (info.m_InputTensorInfos.empty() )
482  {
483  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
484  }
485  if (info.m_OutputTensorInfos.empty())
486  {
487  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
488  }
489  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
490  {
491  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
492  "differ in byte count.");
493  }
494 
495  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
496  }
497  case LayerType::Pad:
498  {
499  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
500  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
501  }
502  case LayerType::Permute:
503  {
504  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
505  if (IsQSymmS16(info))
506  {
507  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
508  }
509  else if (IsBFloat16(info))
510  {
511  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
512  }
513  else if (IsQAsymmS8(info))
514  {
515  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
516  }
518  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
519  }
521  {
522  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
523  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
524  }
526  {
527  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
528  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
529  }
531  {
532  return nullptr;
533  }
534  case LayerType::Prelu:
535  {
536  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
537  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
538  }
539  case LayerType::QLstm:
540  {
541  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
542  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
543  }
544  case LayerType::Quantize:
545  {
546  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
547  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
548  }
549  case LayerType::Rank:
550  {
551  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
552  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
553  }
554  case LayerType::Reduce:
555  {
556  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
557  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
558  }
559  case LayerType::Reshape:
560  {
561  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
562  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
563  }
564  case LayerType::Resize:
565  {
566  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
567  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
568  }
569  case LayerType::Shape:
570  {
571  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
572  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
573  }
574  case LayerType::Slice:
575  {
576  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
577  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
578  }
579  case LayerType::Softmax:
580  {
581  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
582  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
583  }
585  {
586  auto spaceToBatchNdQueueDescriptor
587  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
588  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
589  }
591  {
592  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
593  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
594  }
595  case LayerType::Splitter:
596  {
597  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
598  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
599  }
600  case LayerType::Stack:
601  {
602  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
603  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
604  }
606  {
607  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
608  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
609  }
611  {
612  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
613  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
614  {
615  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
616  }
617  else
618  {
619  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
620  }
621  }
623  {
624  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
625  if (IsQSymmS16(info))
626  {
627  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
628  }
629  else if (IsBFloat16(info))
630  {
631  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
632  }
633  else if (IsQAsymmS8(info))
634  {
635  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
636  }
637  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
638  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
639  (*transposeQueueDescriptor, info);
640  }
642  {
643  auto transposeConvolution2dQueueDescriptor
644  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
645  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
646  }
648  {
649  auto unidirectionalSequenceLstmQueueDescriptor
650  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
651  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
652  info);
653  }
654  default:
655  return nullptr;
656  }
657 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
bool IsQAsymmS8(const WorkloadInfo &info)
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
bool IsQAsymmU8(const WorkloadInfo &info)
bool IsQSymmS8(const WorkloadInfo &info)
bool IsBFloat16(const WorkloadInfo &info)
bool IsFloat16(const WorkloadInfo &info)
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
bool IsSigned32(const WorkloadInfo &info)
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
bool IsQSymmS16(const WorkloadInfo &info)

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 93 of file RefWorkloadFactory.cpp.

Referenced by RefWorkloadFactory::~RefWorkloadFactory().

94 {
95  return s_Id;
96 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 98 of file RefWorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

Referenced by RefWorkloadFactory::~RefWorkloadFactory().

101 {
102  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
103 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 105 of file RefWorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

109 {
110  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
111 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file RefWorkloadFactory.hpp.

References ARMNN_DEPRECATED_MSG.

49 { return false; }

The documentation for this class was generated from the following files: