ArmNN
 22.02
NeonWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonBackendId.hpp"
8 #include "NeonTensorHandle.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Utils.hpp>
17 
22 
25 
26 namespace armnn
27 {
28 
29 namespace
30 {
31 static const BackendId s_Id{NeonBackendId()};
32 }
33 
35  Optional<DataType> dataType,
36  std::string& outReasonIfUnsupported)
37 {
38  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39 }
40 
42  Optional<DataType> dataType,
43  std::string& outReasonIfUnsupported,
44  const ModelOptions& modelOptions)
45 {
46  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47 }
48 
50 {
51  return s_Id;
52 }
53 
54 void NeonWorkloadFactory::SetNumberOfThreads()
55 {
56  if (m_ModelContextPtr)
57  {
58  const unsigned int MIN_THREADS = 1;
59  const unsigned int MAX_THREADS = 64;
60 
61  // Set the number of threads to be used if the user has set NumberOfThreads param
62  // Only set if within limit or valid input
63  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
64  auto numberOfThreads = modelOptions->GetNumberOfThreads();
65 
66  if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS)
67  {
68  arm_compute::Scheduler::get().set_num_threads(numberOfThreads);
69  }
70  }
71 }
72 
73 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager)
74  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75 {
76  SetNumberOfThreads();
77 }
78 
79 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
81  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82 {
83  SetNumberOfThreads();
84 }
85 
86 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
87  TensorShape const& subTensorShape,
88  unsigned int const* subTensorOrigin) const
89 {
90  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91 
93  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95  {
96  // Arm compute indexes tensor coords in reverse order.
97  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99  }
100 
101  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103  {
104  return nullptr;
105  }
106 
107  return std::make_unique<NeonSubTensorHandle>(
108  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109 }
110 
111 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
112  const bool IsMemoryManaged) const
113 {
114  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115  if (IsMemoryManaged)
116  {
117  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118  }
119  return tensorHandle;
120 }
121 
122 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
123  DataLayout dataLayout,
124  const bool IsMemoryManaged) const
125 {
126  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127  if (IsMemoryManaged)
128  {
129  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130  }
131  return tensorHandle;
132 }
133 
134 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
135  const QueueDescriptor& descriptor,
136  const WorkloadInfo& info) const
137 {
138  switch(type)
139  {
140  case LayerType::Activation :
141  {
142  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143  return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144  }
145  case LayerType::Addition :
146  {
147  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148  return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149  }
150  case LayerType::ArgMinMax :
151  {
152  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153  return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154  }
156  {
157  auto batchNormalizationQueueDescriptor
158  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
159  return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
160  }
162  {
163  auto batchToSpaceNdQueueDescriptor
164  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
165  return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
166  }
167  case LayerType::Cast :
168  {
169  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
170  return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
171  }
173  {
174  auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
175  return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
176  }
177  case LayerType::Comparison :
178  {
179  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
180  return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
181  }
182  case LayerType::Concat :
183  {
184  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
185  return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
186  }
187  case LayerType::Constant :
188  {
189  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
190  return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
191  }
193  {
194  auto convertBf16ToFp32QueueDescriptor
195  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
196  return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
197  }
199  {
200  auto convertFp16ToFp32QueueDescriptor
201  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
202  return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
203  }
205  {
206  auto convertFp32ToBf16QueueDescriptor
207  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
208  return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
209  }
211  {
212  auto convertFp32ToFp16QueueDescriptor
213  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
214  return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
215  }
217  {
218  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
219 
220  bool isFastMathEnabled = false;
221  if (m_ModelContextPtr)
222  {
223  if (m_ModelContextPtr.get() != nullptr)
224  {
225  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
226  if (modelOptions)
227  {
228  isFastMathEnabled = modelOptions->IsFastMathEnabled();
229  }
230  }
231  }
232  return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
233  info,
234  m_MemoryManager->GetIntraLayerManager(),
235  isFastMathEnabled);
236  }
238  {
239  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
240 
241  bool isFastMathEnabled = false;
242  if (m_ModelContextPtr)
243  {
244  if (m_ModelContextPtr.get() != nullptr)
245  {
246  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
247  if (modelOptions)
248  {
249  isFastMathEnabled = modelOptions->IsFastMathEnabled();
250  }
251  }
252  }
253  return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
254  info,
255  m_MemoryManager->GetIntraLayerManager(),
256  isFastMathEnabled);
257  }
258  case LayerType::Debug :
259  {
260  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
261  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
262  }
264  {
265  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
266  return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
267  }
269  {
270  auto depthwiseConvolution2dQueueDescriptor
271  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
272  return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
273  }
274  case LayerType::Dequantize :
275  {
276  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
277  return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
278  }
280  {
281  auto detectionPostProcessQueueDescriptor
282  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
283  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
284  }
285  case LayerType::Division :
286  {
287  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
288  return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
289  }
291  {
292  auto elementwiseUnaryQueueDescriptor
293  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
294 
295  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
296  {
297  case UnaryOperation::Abs:
298  {
299  AbsQueueDescriptor absQueueDescriptor;
300  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
301  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
302 
303  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
304  }
305  case UnaryOperation::Exp:
306  return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
308  return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
309  case UnaryOperation::Log:
310  return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
311  case UnaryOperation::Neg:
312  return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
314  {
315  RsqrtQueueDescriptor rsqrtQueueDescriptor;
316  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
317  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
318 
319  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
320  }
321  case UnaryOperation::Sin:
322  return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
323  default:
324  return nullptr;
325  }
326  }
327  case LayerType::Fill :
328  {
329  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
330  return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
331  }
332  case LayerType::Floor :
333  {
334  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
335  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
336  }
338  {
339  auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
340  return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
341  info,
342  m_MemoryManager->GetIntraLayerManager());
343  }
344  case LayerType::Gather :
345  {
346  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
347  return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
348  }
349  case LayerType::Input :
350  {
351  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
352  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
353  }
355  {
356  auto instanceNormalizationQueueDescriptor
357  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
358  return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
359  }
361  {
362  auto l2NormalizationQueueDescriptor
363  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
364  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
365  (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
366  }
367  case LayerType::LogSoftmax :
368  {
369  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
370  return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
371  info,
372  m_MemoryManager->GetIntraLayerManager());
373  }
375  {
376  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
377 
378  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
379  {
381  return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
383  return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
384  default:
385  return nullptr;
386  }
387  }
388  case LayerType::Lstm :
389  {
390  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
391  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
392  }
393  case LayerType::Maximum :
394  {
395  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
396  return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
397  }
398  case LayerType::Mean :
399  {
400  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
401  return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
402  }
403  case LayerType::MemCopy :
404  {
405  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
406  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
407  {
408  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
409  }
410  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
411  }
412  case LayerType::MemImport :
413  {
414  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
415  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
416  {
417  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
418  }
419  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
420  }
421  case LayerType::Minimum :
422  {
423  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
424  return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
425  }
427  {
428  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
429  return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
430  }
432  {
433  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
434  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
435  (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
436  }
437  case LayerType::Output :
438  {
439  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
440  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
441  }
442  case LayerType::Pad :
443  {
444  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
445  return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
446  }
447  case LayerType::Permute :
448  {
449  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
450  return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
451  }
452  case LayerType::Pooling2d :
453  {
454  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
455  return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
456  }
458  {
459  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
460  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
461  }
462  case LayerType::Prelu :
463  {
464  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
465  return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
466  }
467  case LayerType::QLstm :
468  {
469  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
470  return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
471  }
472  case LayerType::Quantize :
473  {
474  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
475  return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
476  }
478  {
479  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
480  return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
481  }
482  case LayerType::Rank :
483  {
484  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
485  return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
486  }
487  case LayerType::Reduce :
488  {
489  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
490  return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
491  }
492  case LayerType::Reshape :
493  {
494  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
495  return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
496  }
497  case LayerType::Resize :
498  {
499  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
500  return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
501  }
502  case LayerType::Slice :
503  {
504  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
505  return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
506  }
507  case LayerType::Softmax :
508  {
509  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
510  return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
511  info,
512  m_MemoryManager->GetIntraLayerManager());
513  }
515  {
516  auto spaceToBatchNdQueueDescriptor
517  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
518  return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
519  }
521  {
522  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
523  return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
524  }
525  case LayerType::Splitter :
526  {
527  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
528  return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
529  }
530  case LayerType::Stack :
531  {
532  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
533  return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
534  }
536  {
537  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
538  return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
539  }
541  {
542  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
543  return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
544  }
545  case LayerType::Transpose :
546  {
547  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
548  return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
549  }
551  {
552  auto transposeConvolution2dQueueDescriptor
553  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
554  return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
555  info,
556  m_MemoryManager->GetIntraLayerManager());
557  }
558  default:
559  return nullptr;
560  }
561 }
562 
563 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
564  const WorkloadInfo& info) const
565 {
566  return std::make_unique<NeonActivationWorkload>(descriptor, info);
567 }
568 
569 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
570  const WorkloadInfo& info) const
571 {
572  return std::make_unique<NeonAdditionWorkload>(descriptor, info);
573 }
574 
575 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
576  const WorkloadInfo& info) const
577 {
578  return std::make_unique<NeonArgMinMaxWorkload>(descriptor, info);
579 }
580 
581 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
582  const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
583 {
584  return std::make_unique<NeonBatchNormalizationWorkload>(descriptor, info);
585 }
586 
587 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
588  const WorkloadInfo& info) const
589 {
590  return std::make_unique<NeonBatchToSpaceNdWorkload>(descriptor, info);
591 }
592 
593 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
594  const WorkloadInfo& info) const
595 {
596  return std::make_unique<NeonCastWorkload>(descriptor, info);
597 }
598 
599 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
600  const WorkloadInfo& info) const
601 {
602  return std::make_unique<NeonChannelShuffleWorkload>(descriptor, info);
603 }
604 
605 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
606  const WorkloadInfo& info) const
607 {
608  return std::make_unique<NeonComparisonWorkload>(descriptor, info);
609 }
610 
611 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
612  const WorkloadInfo& info) const
613 {
614  return std::make_unique<NeonConcatWorkload>(descriptor, info);
615 }
616 
617 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
618  const WorkloadInfo& info) const
619 {
620  return std::make_unique<NeonConstantWorkload>(descriptor, info);
621 }
622 
623 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertBf16ToFp32(
624  const ConvertBf16ToFp32QueueDescriptor& descriptor,
625  const WorkloadInfo& info) const
626 {
627  return std::make_unique<NeonConvertBf16ToFp32Workload>(descriptor, info);
628 }
629 
630 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
631  const ConvertFp16ToFp32QueueDescriptor& descriptor,
632  const WorkloadInfo& info) const
633 {
634  return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info);
635 }
636 
637 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToBf16(
638  const ConvertFp32ToBf16QueueDescriptor& descriptor,
639  const WorkloadInfo& info) const
640 {
641  return std::make_unique<NeonConvertFp32ToBf16Workload>(descriptor, info);
642 }
643 
644 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
645  const ConvertFp32ToFp16QueueDescriptor& descriptor,
646  const WorkloadInfo& info) const
647 {
648  return std::make_unique<NeonConvertFp32ToFp16Workload>(descriptor, info);
649 }
650 
651 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d(
652  const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
653 {
654  bool isFastMathEnabled = false;
655  if (m_ModelContextPtr)
656  {
657  if (m_ModelContextPtr.get() != nullptr)
658  {
659  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
660  if (modelOptions)
661  {
662  isFastMathEnabled = modelOptions->IsFastMathEnabled();
663  }
664  }
665  }
666  return std::make_unique<NeonConvolution2dWorkload>(descriptor,
667  info,
668  m_MemoryManager->GetIntraLayerManager(),
669  isFastMathEnabled);
670 }
671 
672 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution3d(
673  const Convolution3dQueueDescriptor& descriptor, const WorkloadInfo& info) const
674 {
675  bool isFastMathEnabled = false;
676  if (m_ModelContextPtr)
677  {
678  if (m_ModelContextPtr.get() != nullptr)
679  {
680  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
681  if (modelOptions)
682  {
683  isFastMathEnabled = modelOptions->IsFastMathEnabled();
684  }
685  }
686  }
687  return std::make_unique<NeonConvolution3dWorkload>(descriptor,
688  info,
689  m_MemoryManager->GetIntraLayerManager(),
690  isFastMathEnabled);
691 }
692 
693 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
694  const WorkloadInfo& info) const
695 {
696  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
697 }
698 
699 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
700  const WorkloadInfo& info) const
701 {
702  return std::make_unique<NeonDepthToSpaceWorkload>(descriptor, info);
703 }
704 
705 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
706  const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
707 {
708  return std::make_unique<NeonDepthwiseConvolutionWorkload>(descriptor, info);
709 }
710 
711 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
712  const WorkloadInfo& info) const
713 {
714  return std::make_unique<NeonDequantizeWorkload>(descriptor, info);
715 }
716 
717 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
718  const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
719 {
720  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
721 }
722 
723 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
724  const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
725 {
726  return std::make_unique<NeonDivisionWorkload>(descriptor, info);
727 }
728 
729 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
730  const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const
731 {
732  switch(descriptor.m_Parameters.m_Operation)
733  {
734  case UnaryOperation::Abs:
735  {
736  AbsQueueDescriptor absQueueDescriptor;
737  absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
738  absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
739 
740  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
741  }
742  case UnaryOperation::Exp:
743  return std::make_unique<NeonExpWorkload>(descriptor, info);
745  return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
746  case UnaryOperation::Log:
747  return std::make_unique<NeonLogWorkload>(descriptor, info);
748  case UnaryOperation::Neg:
749  return std::make_unique<NeonNegWorkload>(descriptor, info);
751  {
752  RsqrtQueueDescriptor rsqrtQueueDescriptor;
753  rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
754  rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
755 
756  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
757  }
758  case UnaryOperation::Sin:
759  return std::make_unique<NeonSinWorkload>(descriptor, info);
760  default:
761  return nullptr;
762  }
763 }
764 
765 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
766  const WorkloadInfo& info) const
767 {
768  return std::make_unique<NeonFillWorkload>(descriptor, info);
769 }
770 
771 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
772  const WorkloadInfo& info) const
773 {
774  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
775 }
776 
777 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
778  const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
779 {
780  return std::make_unique<NeonFullyConnectedWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
781 }
782 
783 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
784  const armnn::WorkloadInfo& info) const
785 {
786  return std::make_unique<NeonGatherWorkload>(descriptor, info);
787 }
788 
789 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
790  const WorkloadInfo& info) const
791 {
792  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
793 }
794 
795 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInstanceNormalization(
796  const InstanceNormalizationQueueDescriptor& descriptor,
797  const WorkloadInfo& info) const
798 {
799  return std::make_unique<NeonInstanceNormalizationWorkload>(descriptor, info);
800 }
801 
802 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
803  const WorkloadInfo& info) const
804 {
805  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
806  m_MemoryManager->GetIntraLayerManager());
807 }
808 
809 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
810  const WorkloadInfo& info) const
811 {
812  return std::make_unique<NeonLogSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
813 }
814 
815 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
816  const WorkloadInfo& info) const
817 {
818  switch(descriptor.m_Parameters.m_Operation)
819  {
821  return std::make_unique<NeonLogicalAndWorkload>(descriptor, info);
823  return std::make_unique<NeonLogicalOrWorkload>(descriptor, info);
824  default:
825  return nullptr;
826  }
827 }
828 
829 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
830  const WorkloadInfo& info) const
831 {
832  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
833 }
834 
835 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
836  const WorkloadInfo& info) const
837 {
838  return std::make_unique<NeonMaximumWorkload>(descriptor, info);
839 }
840 
841 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
842  const WorkloadInfo& info) const
843 {
844  return std::make_unique<NeonMeanWorkload>(descriptor, info);
845 }
846 
847 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
848  const WorkloadInfo& info) const
849 {
850  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
851  {
852  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
853  }
854 
855  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
856 }
857 
858 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
859  const WorkloadInfo& info) const
860 {
861  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
862  {
863  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
864  }
865 
866  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
867 }
868 
869 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
870  const WorkloadInfo& info) const
871 {
872  return std::make_unique<NeonMinimumWorkload>(descriptor, info);
873 }
874 
875 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
876  const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
877 {
878  return std::make_unique<NeonMultiplicationWorkload>(descriptor, info);
879 }
880 
881 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
882  const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
883 {
884  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
885  m_MemoryManager->GetIntraLayerManager());
886 }
887 
888 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
889  const WorkloadInfo& info) const
890 {
891  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
892 }
893 
894 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
895  const WorkloadInfo& info) const
896 {
897  return std::make_unique<NeonPadWorkload>(descriptor, info);
898 }
899 
900 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
901  const WorkloadInfo& info) const
902 {
903  return std::make_unique<NeonPermuteWorkload>(descriptor, info);
904 }
905 
906 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
907  const WorkloadInfo& info) const
908 {
909  return std::make_unique<NeonPooling2dWorkload>(descriptor, info);
910 }
911 
912 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
913  const WorkloadInfo& info) const
914 {
915  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
916 }
917 
918 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePrelu(const armnn::PreluQueueDescriptor &descriptor,
919  const armnn::WorkloadInfo &info) const
920 {
921  return std::make_unique<NeonPreluWorkload>(descriptor, info);
922 }
923 
924 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
925  const WorkloadInfo& info) const
926 {
927  return std::make_unique<NeonQLstmWorkload>(descriptor, info);
928 }
929 
930 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
931  const WorkloadInfo& info) const
932 {
933  return std::make_unique<NeonQuantizeWorkload>(descriptor, info);
934 }
935 
936 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
937  const WorkloadInfo& info) const
938 {
939  return std::make_unique<NeonQuantizedLstmWorkload>(descriptor, info);
940 }
941 
942 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
943  const WorkloadInfo& info) const
944 {
945  return std::make_unique<NeonRankWorkload>(descriptor, info);
946 }
947 
948 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
949  const WorkloadInfo& info) const
950 {
951  return std::make_unique<NeonReduceWorkload>(descriptor, info);
952 }
953 
954 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
955  const WorkloadInfo& info) const
956 {
957  return std::make_unique<NeonReshapeWorkload>(descriptor, info);
958 }
959 
960 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
961  const WorkloadInfo& info) const
962 {
963  return std::make_unique<NeonResizeWorkload>(descriptor, info);
964 }
965 
966 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
967  const WorkloadInfo& info) const
968 {
969  return std::make_unique<NeonSliceWorkload>(descriptor, info);
970 }
971 
972 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
973  const WorkloadInfo& info) const
974 {
975  return std::make_unique<NeonSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
976 }
977 
978 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
979  const WorkloadInfo& info) const
980 {
981  return std::make_unique<NeonSpaceToBatchNdWorkload>(descriptor, info);
982 }
983 
984 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
985  const WorkloadInfo& info) const
986 {
987  return std::make_unique<NeonSpaceToDepthWorkload>(descriptor, info);
988 }
989 
990 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
991  const WorkloadInfo& info) const
992 {
993  return std::make_unique<NeonSplitterWorkload>(descriptor, info);
994 }
995 
996 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
997  const WorkloadInfo& info) const
998 {
999  return std::make_unique<NeonStackWorkload>(descriptor, info);
1000 }
1001 
1002 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1003  const WorkloadInfo& info) const
1004 {
1005  return std::make_unique<NeonStridedSliceWorkload>(descriptor, info);
1006 }
1007 
1008 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
1009  const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
1010 {
1011  return std::make_unique<NeonSubtractionWorkload>(descriptor, info);
1012 }
1013 
1014 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1015  const WorkloadInfo& info) const
1016 {
1017  return std::make_unique<NeonTransposeWorkload>(descriptor, info);
1018 }
1019 
1020 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateTransposeConvolution2d(
1021  const TransposeConvolution2dQueueDescriptor &descriptor,
1022  const WorkloadInfo &info) const
1023 {
1024  return std::make_unique<NeonTransposeConvolution2dWorkload>(descriptor, info,
1025  m_MemoryManager->GetIntraLayerManager());
1026 }
1027 
1028 } // namespace armnn
std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &, const WorkloadInfo &) const override
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
DataLayout
Definition: Types.hpp:49
std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::vector< BackendOptions > ModelOptions
std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &, const WorkloadInfo &) const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
constexpr const char * NeonBackendId()
std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &, const WorkloadInfo &) const override
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &, const WorkloadInfo &) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &, const WorkloadInfo &) const override
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &, const WorkloadInfo &) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &, const WorkloadInfo &) const override
const BackendId & GetBackendId() const override
std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &, const WorkloadInfo &) const override
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &, const WorkloadInfo &) const override
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &, const WorkloadInfo &) const override
std::vector< ITensorHandle * > m_Outputs
std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &, const WorkloadInfo &) const override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &, const WorkloadInfo &) const override
Contains information about TensorInfos of a layer.
std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &, const WorkloadInfo &) const override
std::vector< ITensorHandle * > m_Inputs
std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &, const WorkloadInfo &) const override
Depthwise Convolution 2D layer workload data.
std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458