ArmNN
 24.02
NeonWorkloadFactory Class Reference

#include <NeonWorkloadFactory.hpp>

Inheritance diagram for NeonWorkloadFactory:
[legend]
Collaboration diagram for NeonWorkloadFactory:
[legend]

Public Member Functions

 NeonWorkloadFactory (const std::shared_ptr< NeonMemoryManager > &memoryManager)
 
 NeonWorkloadFactory (const std::shared_ptr< NeonMemoryManager > &memoryManager, const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 Backends should implement their own CreateWorkload function with a switch statement. More...
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 20 of file NeonWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ NeonWorkloadFactory() [1/2]

NeonWorkloadFactory ( const std::shared_ptr< NeonMemoryManager > &  memoryManager)

Definition at line 73 of file NeonWorkloadFactory.cpp.

74  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75 {
76  SetNumberOfThreads();
77 }

◆ NeonWorkloadFactory() [2/2]

NeonWorkloadFactory ( const std::shared_ptr< NeonMemoryManager > &  memoryManager,
const IBackendInternal::IBackendSpecificModelContextPtr modelContextPtr 
)

Definition at line 79 of file NeonWorkloadFactory.cpp.

81  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82 {
83  SetNumberOfThreads();
84 }

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr< ITensorHandle > CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 86 of file NeonWorkloadFactory.cpp.

89 {
90  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91 
93  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95  {
96  // Arm compute indexes tensor coords in reverse order.
97  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99  }
100 
101  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103  {
104  return nullptr;
105  }
106 
107  return std::make_unique<NeonSubTensorHandle>(
108  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109 }

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 111 of file NeonWorkloadFactory.cpp.

113 {
114  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115  if (IsMemoryManaged)
116  {
117  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118  }
119  return tensorHandle;
120 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 122 of file NeonWorkloadFactory.cpp.

125 {
126  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127  if (IsMemoryManaged)
128  {
129  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130  }
131  return tensorHandle;
132 }

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Backends should implement their own CreateWorkload function with a switch statement.

The case for the switch should be the LayerType and based on that they will call their specific workload creation functionality.

Reimplemented from WorkloadFactoryBase.

Definition at line 134 of file NeonWorkloadFactory.cpp.

137 {
138  switch(type)
139  {
140  case LayerType::Activation :
141  {
142  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143  return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144  }
145  case LayerType::Addition :
146  {
147  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148  return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149  }
150  case LayerType::ArgMinMax :
151  {
152  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153  return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154  }
156  {
157  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
158  bool isFastMathEnabled = false;
159  if (m_ModelContextPtr)
160  {
161  if (m_ModelContextPtr.get() != nullptr)
162  {
163  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
164  if (modelOptions)
165  {
166  isFastMathEnabled = modelOptions->IsFastMathEnabled();
167  }
168  }
169  }
170  return std::make_unique<NeonBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, isFastMathEnabled);
171  }
173  {
174  auto batchNormalizationQueueDescriptor
175  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176  return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
177  }
179  {
180  auto batchToSpaceNdQueueDescriptor
181  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182  return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183  }
184  case LayerType::Cast :
185  {
186  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187  return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
188  }
190  {
191  auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
192  return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
193  }
194  case LayerType::Comparison :
195  {
196  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
197  return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
198  }
199  case LayerType::Concat :
200  {
201  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
202  return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
203  }
204  case LayerType::Constant :
205  {
206  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
207  return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
208  }
210  {
211  auto convertFp16ToFp32QueueDescriptor
212  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
213  return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
214  }
216  {
217  auto convertFp32ToFp16QueueDescriptor
218  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
219  return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
220  }
222  {
223  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
224  bool isFastMathEnabled = false;
225  if (m_ModelContextPtr)
226  {
227  if (m_ModelContextPtr.get() != nullptr)
228  {
229  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
230  if (modelOptions)
231  {
232  isFastMathEnabled = modelOptions->IsFastMathEnabled();
233  }
234  }
235  }
236  return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
237  info,
238  m_MemoryManager->GetIntraLayerManager(),
239  isFastMathEnabled);
240  }
242  {
243  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
244  bool isFastMathEnabled = false;
245  if (m_ModelContextPtr)
246  {
247  if (m_ModelContextPtr.get() != nullptr)
248  {
249  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
250  if (modelOptions)
251  {
252  isFastMathEnabled = modelOptions->IsFastMathEnabled();
253  }
254  }
255  }
256  return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
257  info,
258  m_MemoryManager->GetIntraLayerManager(),
259  isFastMathEnabled);
260  }
261  case LayerType::Debug :
262  {
263  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
264  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
265  }
267  {
268  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
269  return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
270  }
272  {
273  auto depthwiseConvolution2dQueueDescriptor
274  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
275  return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
276  }
277  case LayerType::Dequantize :
278  {
279  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
280  return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
281  }
283  {
284  auto detectionPostProcessQueueDescriptor
285  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
286  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
287  }
288  case LayerType::Division :
289  {
290  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
291  return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
292  }
294  {
295  auto elementwiseBinaryQueueDescriptor
296  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
297  switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
298  {
300  {
301  AdditionQueueDescriptor additionQueueDescriptor;
302  additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
303  additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
304  return std::make_unique<NeonAdditionWorkload>(additionQueueDescriptor, info);
305  }
307  {
308  DivisionQueueDescriptor divisionQueueDescriptor;
309  divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
310  divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
311  return std::make_unique<NeonDivisionWorkload>(divisionQueueDescriptor, info);
312  }
314  {
315  MaximumQueueDescriptor maximumQueueDescriptor;
316  maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
317  maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
318  return std::make_unique<NeonMaximumWorkload>(maximumQueueDescriptor, info);
319  }
321  {
322  MinimumQueueDescriptor minimumQueueDescriptor;
323  minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
324  minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
325  return std::make_unique<NeonMinimumWorkload>(minimumQueueDescriptor, info);
326  }
328  {
329  MultiplicationQueueDescriptor multiplicationQueueDescriptor;
330  multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
331  multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
332  return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor, info);
333  }
336  {
337  return std::make_unique<NeonElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
338  }
340  {
341  SubtractionQueueDescriptor subtractionQueueDescriptor;
342  subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
343  subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
344  return std::make_unique<NeonSubtractionWorkload>(subtractionQueueDescriptor, info);
345  }
346  default:
347  return nullptr;
348  }
349  }
351  {
352  auto elementwiseUnaryQueueDescriptor
353  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
354  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
355  {
356  case UnaryOperation::Abs:
357  {
358  AbsQueueDescriptor absQueueDescriptor;
359  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
360  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
361  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
362  }
363  case UnaryOperation::Exp:
364  return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
366  return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
367  case UnaryOperation::Log:
368  return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
369  case UnaryOperation::Neg:
370  return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
372  {
373  RsqrtQueueDescriptor rsqrtQueueDescriptor;
374  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
375  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
376  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
377  }
378  case UnaryOperation::Sin:
379  return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
381  return std::make_unique<NeonSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info);
382  default:
383  return nullptr;
384  }
385  }
386  case LayerType::Fill :
387  {
388  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
389  return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
390  }
391  case LayerType::Floor :
392  {
393  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
394  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
395  }
397  {
398  auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
399  return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
400  info,
401  m_MemoryManager->GetIntraLayerManager());
402  }
403  case LayerType::Fused :
404  {
405  auto fusedQueueDescriptor = PolymorphicDowncast<const FusedQueueDescriptor*>(&descriptor);
406  return std::make_unique<NeonFusedWorkload>(*fusedQueueDescriptor, info);
407  }
408  case LayerType::Gather :
409  {
410  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
411  return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
412  }
413  case LayerType::GatherNd :
414  {
415  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
416  return std::make_unique<NeonGatherNdWorkload>(*gatherNdQueueDescriptor, info);
417  }
418  case LayerType::Input :
419  {
420  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
421  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
422  }
424  {
425  auto instanceNormalizationQueueDescriptor
426  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
427  return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
428  }
430  {
431  auto l2NormalizationQueueDescriptor
432  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
433  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
434  (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
435  }
436  case LayerType::LogSoftmax :
437  {
438  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
439  return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
440  info,
441  m_MemoryManager->GetIntraLayerManager());
442  }
444  {
445  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
446  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
447  {
449  return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
451  return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
452  default:
453  return nullptr;
454  }
455  }
456  case LayerType::Lstm :
457  {
458  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
459  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
460  }
461  case LayerType::Maximum :
462  {
463  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
464  return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
465  }
466  case LayerType::Mean :
467  {
468  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
469  return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
470  }
471  case LayerType::MemCopy :
472  {
473  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
474  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
475  {
476  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
477  }
478  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
479  }
480  case LayerType::MemImport :
481  {
482  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
483  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
484  {
485  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
486  }
487  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
488  }
489  case LayerType::Minimum :
490  {
491  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
492  return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
493  }
495  {
496  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
497  return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
498  }
500  {
501  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
502  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
503  (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
504  }
505  case LayerType::Output :
506  {
507  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
508  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
509  }
510  case LayerType::Pad :
511  {
512  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
513  return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
514  }
515  case LayerType::Permute :
516  {
517  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
518  return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
519  }
520  case LayerType::Pooling2d :
521  {
522  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
523  return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
524  }
525  case LayerType::Pooling3d :
526  {
527  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
528  return std::make_unique<NeonPooling3dWorkload>(*pooling3dQueueDescriptor, info);
529  }
531  {
532  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
533  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
534  }
535  case LayerType::Prelu :
536  {
537  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
538  return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
539  }
540  case LayerType::QLstm :
541  {
542  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
543  return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
544  }
545  case LayerType::Quantize :
546  {
547  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
548  return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
549  }
551  {
552  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
553  return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
554  }
555  case LayerType::Rank :
556  {
557  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
558  return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
559  }
560  case LayerType::Reduce :
561  {
562  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
563  return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
564  }
565  case LayerType::Reshape :
566  {
567  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
568  return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
569  }
570  case LayerType::Resize :
571  {
572  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
573  return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
574  }
575  case LayerType::ReverseV2 :
576  {
577  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
578  return std::make_unique<NeonReverseV2Workload>(*reverseV2QueueDescriptor, info);
579  }
580  case LayerType::Slice :
581  {
582  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
583  return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
584  }
585  case LayerType::Softmax :
586  {
587  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
588  return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
589  info,
590  m_MemoryManager->GetIntraLayerManager());
591  }
593  {
594  auto spaceToBatchNdQueueDescriptor
595  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
596  return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
597  }
599  {
600  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
601  return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
602  }
603  case LayerType::Splitter :
604  {
605  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
606  return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
607  }
608  case LayerType::Stack :
609  {
610  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
611  return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
612  }
614  {
615  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
616  return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
617  }
619  {
620  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
621  return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
622  }
623  case LayerType::Tile:
624  {
625  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
626  return std::make_unique<NeonTileWorkload>(*tileQueueDescriptor, info);
627  }
628  case LayerType::Transpose :
629  {
630  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
631  return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
632  }
634  {
635  auto transposeConvolution2dQueueDescriptor
636  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
637  return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
638  info,
639  m_MemoryManager->GetIntraLayerManager());
640  }
642  {
643  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
644  if ((info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
645  (info.m_InputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
646  (info.m_InputTensorInfos[2].GetDataType() == armnn::DataType::Float32) &&
647  (info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
648  (info.m_OutputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
649  (info.m_OutputTensorInfos[2].GetDataType() == armnn::DataType::Float32))
650  {
651  return std::make_unique<NeonUnidirectionalSequenceLstmFloatWorkload>(*desc, info);
652  }
653  else
654  {
655  return std::make_unique<NeonUnidirectionalSequenceLstmWorkload>(*desc, info);
656  }
657  }
658  default:
659  return nullptr;
660  }
661 }

References armnn::Abs, armnn::Activation, armnn::Add, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Div, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::Exp, armnn::Fill, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Fused, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, NeonBackendModelContext::IsFastMathEnabled(), armnn::L2Normalization, armnn::Log, armnn::LogicalAnd, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogicalOr, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Mul, armnn::Multiplication, armnn::Neg, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Power, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::Rsqrt, armnn::Sin, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::SqDiff, armnn::Sqrt, armnn::Stack, armnn::StridedSlice, armnn::Sub, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file NeonWorkloadFactory.cpp.

50 {
51  return s_Id;
52 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 41 of file NeonWorkloadFactory.cpp.

45 {
46  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47 }

References IWorkloadFactory::IsLayerSupported().

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 34 of file NeonWorkloadFactory.cpp.

37 {
38  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 39 of file NeonWorkloadFactory.hpp.

39 { return true; }

The documentation for this class was generated from the following files:
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::BinaryOperation::Mul
@ Mul
armnn::LayerType::Permute
@ Permute
armnn::BinaryOperation::Add
@ Add
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::DataType::Float32
@ Float32
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::LayerType::Tile
@ Tile
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::Slice
@ Slice
armnn::BinaryOperation::Maximum
@ Maximum
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::UnaryOperation::Exp
@ Exp
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::Division
@ Division
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::Fused
@ Fused
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::UnaryOperation::Abs
@ Abs
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::BinaryOperation::Div
@ Div
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant