ArmNN
 23.05
NeonWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonBackendId.hpp"
8 #include "NeonTensorHandle.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Utils.hpp>
17 
22 
25 
26 namespace armnn
27 {
28 
29 namespace
30 {
31 static const BackendId s_Id{NeonBackendId()};
32 }
33 
35  Optional<DataType> dataType,
36  std::string& outReasonIfUnsupported)
37 {
38  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39 }
40 
42  Optional<DataType> dataType,
43  std::string& outReasonIfUnsupported,
44  const ModelOptions& modelOptions)
45 {
46  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47 }
48 
50 {
51  return s_Id;
52 }
53 
54 void NeonWorkloadFactory::SetNumberOfThreads()
55 {
56  if (m_ModelContextPtr)
57  {
58  const unsigned int MIN_THREADS = 1;
59  const unsigned int MAX_THREADS = 64;
60 
61  // Set the number of threads to be used if the user has set NumberOfThreads param
62  // Only set if within limit or valid input
63  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
64  auto numberOfThreads = modelOptions->GetNumberOfThreads();
65 
66  if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS)
67  {
68  arm_compute::Scheduler::get().set_num_threads(numberOfThreads);
69  }
70  }
71 }
72 
73 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager)
74  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75 {
76  SetNumberOfThreads();
77 }
78 
79 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
81  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82 {
83  SetNumberOfThreads();
84 }
85 
86 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
87  TensorShape const& subTensorShape,
88  unsigned int const* subTensorOrigin) const
89 {
90  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91 
93  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95  {
96  // Arm compute indexes tensor coords in reverse order.
97  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99  }
100 
101  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103  {
104  return nullptr;
105  }
106 
107  return std::make_unique<NeonSubTensorHandle>(
108  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109 }
110 
111 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
112  const bool IsMemoryManaged) const
113 {
114  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115  if (IsMemoryManaged)
116  {
117  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118  }
119  return tensorHandle;
120 }
121 
122 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
123  DataLayout dataLayout,
124  const bool IsMemoryManaged) const
125 {
126  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127  if (IsMemoryManaged)
128  {
129  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130  }
131  return tensorHandle;
132 }
133 
134 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
135  const QueueDescriptor& descriptor,
136  const WorkloadInfo& info) const
137 {
138  switch(type)
139  {
140  case LayerType::Activation :
141  {
142  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143  return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144  }
145  case LayerType::Addition :
146  {
147  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148  return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149  }
150  case LayerType::ArgMinMax :
151  {
152  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153  return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154  }
156  {
157  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
158  bool isFastMathEnabled = false;
159  if (m_ModelContextPtr)
160  {
161  if (m_ModelContextPtr.get() != nullptr)
162  {
163  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
164  if (modelOptions)
165  {
166  isFastMathEnabled = modelOptions->IsFastMathEnabled();
167  }
168  }
169  }
170  return std::make_unique<NeonBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, isFastMathEnabled);
171  }
173  {
174  auto batchNormalizationQueueDescriptor
175  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176  return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
177  }
179  {
180  auto batchToSpaceNdQueueDescriptor
181  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182  return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183  }
184  case LayerType::Cast :
185  {
186  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187  return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
188  }
190  {
191  auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
192  return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
193  }
194  case LayerType::Comparison :
195  {
196  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
197  return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
198  }
199  case LayerType::Concat :
200  {
201  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
202  return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
203  }
204  case LayerType::Constant :
205  {
206  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
207  return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
208  }
210  {
211  auto convertFp16ToFp32QueueDescriptor
212  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
213  return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
214  }
216  {
217  auto convertFp32ToFp16QueueDescriptor
218  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
219  return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
220  }
222  {
223  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
224 
225  bool isFastMathEnabled = false;
226  if (m_ModelContextPtr)
227  {
228  if (m_ModelContextPtr.get() != nullptr)
229  {
230  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
231  if (modelOptions)
232  {
233  isFastMathEnabled = modelOptions->IsFastMathEnabled();
234  }
235  }
236  }
237  return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
238  info,
239  m_MemoryManager->GetIntraLayerManager(),
240  isFastMathEnabled);
241  }
243  {
244  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
245 
246  bool isFastMathEnabled = false;
247  if (m_ModelContextPtr)
248  {
249  if (m_ModelContextPtr.get() != nullptr)
250  {
251  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
252  if (modelOptions)
253  {
254  isFastMathEnabled = modelOptions->IsFastMathEnabled();
255  }
256  }
257  }
258  return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
259  info,
260  m_MemoryManager->GetIntraLayerManager(),
261  isFastMathEnabled);
262  }
263  case LayerType::Debug :
264  {
265  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
266  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
267  }
269  {
270  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
271  return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
272  }
274  {
275  auto depthwiseConvolution2dQueueDescriptor
276  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
277  return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
278  }
279  case LayerType::Dequantize :
280  {
281  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
282  return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
283  }
285  {
286  auto detectionPostProcessQueueDescriptor
287  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
288  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
289  }
290  case LayerType::Division :
291  {
292  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
293  return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
294  }
296  {
297  auto elementwiseBinaryQueueDescriptor
298  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
299 
300  switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
301  {
303  {
304  AdditionQueueDescriptor additionQueueDescriptor;
305  additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
306  additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
307  return std::make_unique<NeonAdditionWorkload>(additionQueueDescriptor, info);
308  }
310  {
311  DivisionQueueDescriptor divisionQueueDescriptor;
312  divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
313  divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
314  return std::make_unique<NeonDivisionWorkload>(divisionQueueDescriptor, info);
315  }
317  {
318  MaximumQueueDescriptor maximumQueueDescriptor;
319  maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
320  maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
321  return std::make_unique<NeonMaximumWorkload>(maximumQueueDescriptor, info);
322  }
324  {
325  MinimumQueueDescriptor minimumQueueDescriptor;
326  minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
327  minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
328  return std::make_unique<NeonMinimumWorkload>(minimumQueueDescriptor, info);
329  }
331  {
332  MultiplicationQueueDescriptor multiplicationQueueDescriptor;
333  multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
334  multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
335  return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor, info);
336  }
338  {
339  SubtractionQueueDescriptor subtractionQueueDescriptor;
340  subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
341  subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
342  return std::make_unique<NeonSubtractionWorkload>(subtractionQueueDescriptor, info);
343  }
344  default:
345  return nullptr;
346  }
347  }
349  {
350  auto elementwiseUnaryQueueDescriptor
351  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
352 
353  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
354  {
355  case UnaryOperation::Abs:
356  {
357  AbsQueueDescriptor absQueueDescriptor;
358  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
359  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
360 
361  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
362  }
363  case UnaryOperation::Exp:
364  return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
366  return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
367  case UnaryOperation::Log:
368  return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
369  case UnaryOperation::Neg:
370  return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
372  {
373  RsqrtQueueDescriptor rsqrtQueueDescriptor;
374  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
375  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
376 
377  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
378  }
379  case UnaryOperation::Sin:
380  return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
382  return std::make_unique<NeonSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info);
383  default:
384  return nullptr;
385  }
386  }
387  case LayerType::Fill :
388  {
389  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
390  return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
391  }
392  case LayerType::Floor :
393  {
394  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
395  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
396  }
398  {
399  auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
400  return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
401  info,
402  m_MemoryManager->GetIntraLayerManager());
403  }
404  case LayerType::Gather :
405  {
406  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
407  return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
408  }
409  case LayerType::GatherNd :
410  {
411  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
412  return std::make_unique<NeonGatherNdWorkload>(*gatherNdQueueDescriptor, info);
413  }
414  case LayerType::Input :
415  {
416  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
417  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
418  }
420  {
421  auto instanceNormalizationQueueDescriptor
422  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
423  return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
424  }
426  {
427  auto l2NormalizationQueueDescriptor
428  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
429  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
430  (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
431  }
432  case LayerType::LogSoftmax :
433  {
434  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
435  return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
436  info,
437  m_MemoryManager->GetIntraLayerManager());
438  }
440  {
441  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
442 
443  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
444  {
446  return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
448  return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
449  default:
450  return nullptr;
451  }
452  }
453  case LayerType::Lstm :
454  {
455  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
456  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
457  }
458  case LayerType::Maximum :
459  {
460  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
461  return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
462  }
463  case LayerType::Mean :
464  {
465  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
466  return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
467  }
468  case LayerType::MemCopy :
469  {
470  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
471  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
472  {
473  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
474  }
475  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
476  }
477  case LayerType::MemImport :
478  {
479  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
480  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
481  {
482  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
483  }
484  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
485  }
486  case LayerType::Minimum :
487  {
488  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
489  return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
490  }
492  {
493  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
494  return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
495  }
497  {
498  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
499  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
500  (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
501  }
502  case LayerType::Output :
503  {
504  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
505  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
506  }
507  case LayerType::Pad :
508  {
509  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
510  return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
511  }
512  case LayerType::Permute :
513  {
514  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
515  return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
516  }
517  case LayerType::Pooling2d :
518  {
519  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
520  return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
521  }
522  case LayerType::Pooling3d :
523  {
524  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
525  return std::make_unique<NeonPooling3dWorkload>(*pooling3dQueueDescriptor, info);
526  }
528  {
529  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
530  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
531  }
532  case LayerType::Prelu :
533  {
534  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
535  return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
536  }
537  case LayerType::QLstm :
538  {
539  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
540  return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
541  }
542  case LayerType::Quantize :
543  {
544  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
545  return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
546  }
548  {
549  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
550  return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
551  }
552  case LayerType::Rank :
553  {
554  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
555  return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
556  }
557  case LayerType::Reduce :
558  {
559  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
560  return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
561  }
562  case LayerType::Reshape :
563  {
564  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
565  return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
566  }
567  case LayerType::Resize :
568  {
569  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
570  return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
571  }
572  case LayerType::Slice :
573  {
574  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
575  return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
576  }
577  case LayerType::Softmax :
578  {
579  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
580  return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
581  info,
582  m_MemoryManager->GetIntraLayerManager());
583  }
585  {
586  auto spaceToBatchNdQueueDescriptor
587  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
588  return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
589  }
591  {
592  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
593  return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
594  }
595  case LayerType::Splitter :
596  {
597  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
598  return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
599  }
600  case LayerType::Stack :
601  {
602  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
603  return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
604  }
606  {
607  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
608  return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
609  }
611  {
612  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
613  return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
614  }
615  case LayerType::Transpose :
616  {
617  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
618  return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
619  }
621  {
622  auto transposeConvolution2dQueueDescriptor
623  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
624  return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
625  info,
626  m_MemoryManager->GetIntraLayerManager());
627  }
629  {
630  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
631 
632  if ((info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
633  (info.m_InputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
634  (info.m_InputTensorInfos[2].GetDataType() == armnn::DataType::Float32) &&
635  (info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
636  (info.m_OutputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
637  (info.m_OutputTensorInfos[2].GetDataType() == armnn::DataType::Float32))
638  {
639  return std::make_unique<NeonUnidirectionalSequenceLstmFloatWorkload>(*desc, info);
640  }
641  else
642  {
643  return std::make_unique<NeonUnidirectionalSequenceLstmWorkload>(*desc, info);
644  }
645  }
646  default:
647  return nullptr;
648  }
649 }
650 
651 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
652  const WorkloadInfo& info) const
653 {
654  return std::make_unique<NeonActivationWorkload>(descriptor, info);
655 }
656 
657 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
658  const WorkloadInfo& info) const
659 {
660  return std::make_unique<NeonAdditionWorkload>(descriptor, info);
661 }
662 
663 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
664  const WorkloadInfo& info) const
665 {
666  return std::make_unique<NeonArgMinMaxWorkload>(descriptor, info);
667 }
668 
669 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
670  const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
671 {
672  return std::make_unique<NeonBatchNormalizationWorkload>(descriptor, info);
673 }
674 
675 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
676  const WorkloadInfo& info) const
677 {
678  return std::make_unique<NeonBatchToSpaceNdWorkload>(descriptor, info);
679 }
680 
681 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
682  const WorkloadInfo& info) const
683 {
684  return std::make_unique<NeonCastWorkload>(descriptor, info);
685 }
686 
687 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
688  const WorkloadInfo& info) const
689 {
690  return std::make_unique<NeonChannelShuffleWorkload>(descriptor, info);
691 }
692 
693 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
694  const WorkloadInfo& info) const
695 {
696  return std::make_unique<NeonComparisonWorkload>(descriptor, info);
697 }
698 
699 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
700  const WorkloadInfo& info) const
701 {
702  return std::make_unique<NeonConcatWorkload>(descriptor, info);
703 }
704 
705 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
706  const WorkloadInfo& info) const
707 {
708  return std::make_unique<NeonConstantWorkload>(descriptor, info);
709 }
710 
711 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
712  const ConvertFp16ToFp32QueueDescriptor& descriptor,
713  const WorkloadInfo& info) const
714 {
715  return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info);
716 }
717 
718 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
719  const ConvertFp32ToFp16QueueDescriptor& descriptor,
720  const WorkloadInfo& info) const
721 {
722  return std::make_unique<NeonConvertFp32ToFp16Workload>(descriptor, info);
723 }
724 
725 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d(
726  const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
727 {
728  bool isFastMathEnabled = false;
729  if (m_ModelContextPtr)
730  {
731  if (m_ModelContextPtr.get() != nullptr)
732  {
733  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
734  if (modelOptions)
735  {
736  isFastMathEnabled = modelOptions->IsFastMathEnabled();
737  }
738  }
739  }
740  return std::make_unique<NeonConvolution2dWorkload>(descriptor,
741  info,
742  m_MemoryManager->GetIntraLayerManager(),
743  isFastMathEnabled);
744 }
745 
746 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution3d(
747  const Convolution3dQueueDescriptor& descriptor, const WorkloadInfo& info) const
748 {
749  bool isFastMathEnabled = false;
750  if (m_ModelContextPtr)
751  {
752  if (m_ModelContextPtr.get() != nullptr)
753  {
754  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
755  if (modelOptions)
756  {
757  isFastMathEnabled = modelOptions->IsFastMathEnabled();
758  }
759  }
760  }
761  return std::make_unique<NeonConvolution3dWorkload>(descriptor,
762  info,
763  m_MemoryManager->GetIntraLayerManager(),
764  isFastMathEnabled);
765 }
766 
767 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
768  const WorkloadInfo& info) const
769 {
770  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
771 }
772 
773 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
774  const WorkloadInfo& info) const
775 {
776  return std::make_unique<NeonDepthToSpaceWorkload>(descriptor, info);
777 }
778 
779 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
780  const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
781 {
782  return std::make_unique<NeonDepthwiseConvolutionWorkload>(descriptor, info);
783 }
784 
785 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
786  const WorkloadInfo& info) const
787 {
788  return std::make_unique<NeonDequantizeWorkload>(descriptor, info);
789 }
790 
791 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
792  const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
793 {
794  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
795 }
796 
797 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
798  const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
799 {
800  return std::make_unique<NeonDivisionWorkload>(descriptor, info);
801 }
802 
803 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
804  const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const
805 {
806  switch(descriptor.m_Parameters.m_Operation)
807  {
808  case UnaryOperation::Abs:
809  {
810  AbsQueueDescriptor absQueueDescriptor;
811  absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
812  absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
813 
814  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
815  }
816  case UnaryOperation::Exp:
817  return std::make_unique<NeonExpWorkload>(descriptor, info);
819  return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
820  case UnaryOperation::Log:
821  return std::make_unique<NeonLogWorkload>(descriptor, info);
822  case UnaryOperation::Neg:
823  return std::make_unique<NeonNegWorkload>(descriptor, info);
825  {
826  RsqrtQueueDescriptor rsqrtQueueDescriptor;
827  rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
828  rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
829 
830  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
831  }
832  case UnaryOperation::Sin:
833  return std::make_unique<NeonSinWorkload>(descriptor, info);
834  default:
835  return nullptr;
836  }
837 }
838 
839 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
840  const WorkloadInfo& info) const
841 {
842  return std::make_unique<NeonFillWorkload>(descriptor, info);
843 }
844 
845 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
846  const WorkloadInfo& info) const
847 {
848  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
849 }
850 
851 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
852  const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
853 {
854  return std::make_unique<NeonFullyConnectedWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
855 }
856 
857 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
858  const armnn::WorkloadInfo& info) const
859 {
860  return std::make_unique<NeonGatherWorkload>(descriptor, info);
861 }
862 
863 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
864  const WorkloadInfo& info) const
865 {
866  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
867 }
868 
869 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInstanceNormalization(
870  const InstanceNormalizationQueueDescriptor& descriptor,
871  const WorkloadInfo& info) const
872 {
873  return std::make_unique<NeonInstanceNormalizationWorkload>(descriptor, info);
874 }
875 
876 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
877  const WorkloadInfo& info) const
878 {
879  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
880  m_MemoryManager->GetIntraLayerManager());
881 }
882 
883 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
884  const WorkloadInfo& info) const
885 {
886  return std::make_unique<NeonLogSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
887 }
888 
889 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
890  const WorkloadInfo& info) const
891 {
892  switch(descriptor.m_Parameters.m_Operation)
893  {
895  return std::make_unique<NeonLogicalAndWorkload>(descriptor, info);
897  return std::make_unique<NeonLogicalOrWorkload>(descriptor, info);
898  default:
899  return nullptr;
900  }
901 }
902 
903 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
904  const WorkloadInfo& info) const
905 {
906  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
907 }
908 
909 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
910  const WorkloadInfo& info) const
911 {
912  return std::make_unique<NeonMaximumWorkload>(descriptor, info);
913 }
914 
915 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
916  const WorkloadInfo& info) const
917 {
918  return std::make_unique<NeonMeanWorkload>(descriptor, info);
919 }
920 
921 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
922  const WorkloadInfo& info) const
923 {
924  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
925  {
926  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
927  }
928 
929  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
930 }
931 
932 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
933  const WorkloadInfo& info) const
934 {
935  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
936  {
937  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
938  }
939 
940  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
941 }
942 
943 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
944  const WorkloadInfo& info) const
945 {
946  return std::make_unique<NeonMinimumWorkload>(descriptor, info);
947 }
948 
949 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
950  const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
951 {
952  return std::make_unique<NeonMultiplicationWorkload>(descriptor, info);
953 }
954 
955 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
956  const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
957 {
958  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
959  m_MemoryManager->GetIntraLayerManager());
960 }
961 
962 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
963  const WorkloadInfo& info) const
964 {
965  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
966 }
967 
968 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
969  const WorkloadInfo& info) const
970 {
971  return std::make_unique<NeonPadWorkload>(descriptor, info);
972 }
973 
974 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
975  const WorkloadInfo& info) const
976 {
977  return std::make_unique<NeonPermuteWorkload>(descriptor, info);
978 }
979 
980 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
981  const WorkloadInfo& info) const
982 {
983  return std::make_unique<NeonPooling2dWorkload>(descriptor, info);
984 }
985 
986 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
987  const WorkloadInfo& info) const
988 {
989  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
990 }
991 
992 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePrelu(const armnn::PreluQueueDescriptor &descriptor,
993  const armnn::WorkloadInfo &info) const
994 {
995  return std::make_unique<NeonPreluWorkload>(descriptor, info);
996 }
997 
998 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
999  const WorkloadInfo& info) const
1000 {
1001  return std::make_unique<NeonQLstmWorkload>(descriptor, info);
1002 }
1003 
1004 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1005  const WorkloadInfo& info) const
1006 {
1007  return std::make_unique<NeonQuantizeWorkload>(descriptor, info);
1008 }
1009 
1010 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
1011  const WorkloadInfo& info) const
1012 {
1013  return std::make_unique<NeonQuantizedLstmWorkload>(descriptor, info);
1014 }
1015 
1016 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
1017  const WorkloadInfo& info) const
1018 {
1019  return std::make_unique<NeonRankWorkload>(descriptor, info);
1020 }
1021 
1022 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
1023  const WorkloadInfo& info) const
1024 {
1025  return std::make_unique<NeonReduceWorkload>(descriptor, info);
1026 }
1027 
1028 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1029  const WorkloadInfo& info) const
1030 {
1031  return std::make_unique<NeonReshapeWorkload>(descriptor, info);
1032 }
1033 
1034 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1035  const WorkloadInfo& info) const
1036 {
1037  return std::make_unique<NeonResizeWorkload>(descriptor, info);
1038 }
1039 
1040 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1041  const WorkloadInfo& info) const
1042 {
1043  return std::make_unique<NeonSliceWorkload>(descriptor, info);
1044 }
1045 
1046 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1047  const WorkloadInfo& info) const
1048 {
1049  return std::make_unique<NeonSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
1050 }
1051 
1052 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1053  const WorkloadInfo& info) const
1054 {
1055  return std::make_unique<NeonSpaceToBatchNdWorkload>(descriptor, info);
1056 }
1057 
1058 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1059  const WorkloadInfo& info) const
1060 {
1061  return std::make_unique<NeonSpaceToDepthWorkload>(descriptor, info);
1062 }
1063 
1064 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1065  const WorkloadInfo& info) const
1066 {
1067  return std::make_unique<NeonSplitterWorkload>(descriptor, info);
1068 }
1069 
1070 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1071  const WorkloadInfo& info) const
1072 {
1073  return std::make_unique<NeonStackWorkload>(descriptor, info);
1074 }
1075 
1076 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1077  const WorkloadInfo& info) const
1078 {
1079  return std::make_unique<NeonStridedSliceWorkload>(descriptor, info);
1080 }
1081 
1082 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
1083  const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
1084 {
1085  return std::make_unique<NeonSubtractionWorkload>(descriptor, info);
1086 }
1087 
1088 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1089  const WorkloadInfo& info) const
1090 {
1091  return std::make_unique<NeonTransposeWorkload>(descriptor, info);
1092 }
1093 
1094 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateTransposeConvolution2d(
1095  const TransposeConvolution2dQueueDescriptor &descriptor,
1096  const WorkloadInfo &info) const
1097 {
1098  return std::make_unique<NeonTransposeConvolution2dWorkload>(descriptor, info,
1099  m_MemoryManager->GetIntraLayerManager());
1100 }
1101 
1102 } // namespace armnn
armnn::MinimumQueueDescriptor
Definition: WorkloadData.hpp:468
armnn::LayerType::Floor
@ Floor
armnn::WorkloadFactoryBase::CreateMinimum
std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:175
armnn::LayerType::MemCopy
@ MemCopy
armnn::WorkloadFactoryBase::CreatePad
std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:191
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::WorkloadFactoryBase::CreateConcat
std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:66
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonBackendId
constexpr const char * NeonBackendId()
Definition: NeonBackendId.hpp:10
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
Utils.hpp
armnn::DivisionQueueDescriptor
Definition: WorkloadData.hpp:265
armnn::WorkloadFactoryBase::CreatePreCompiled
std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:207
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::WorkloadFactoryBase::CreateBatchNormalization
std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:54
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::Input
@ Input
armnn::WorkloadFactoryBase::CreateLogSoftmax
std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:147
NeonBackendModelContext.hpp
armnn::LayerType::Slice
@ Slice
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::WorkloadFactoryBase::CreateMean
std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:159
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::GatherQueueDescriptor
Definition: WorkloadData.hpp:502
armnn::ITensorHandle::GetShape
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
armnn::LayerType::Quantize
@ Quantize
armnn::DetectionPostProcessQueueDescriptor
Definition: WorkloadData.hpp:234
armnn::IBackendInternal
Definition: IBackendInternal.hpp:77
armnn::AdditionQueueDescriptor
Definition: WorkloadData.hpp:253
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Subtraction
@ Subtraction
armnn::WorkloadFactoryBase::CreateMemImport
std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:167
armnn::WorkloadFactoryBase::CreateQuantize
std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:215
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::UnaryOperation::Exp
@ Exp
armnn::RsqrtQueueDescriptor
Definition: WorkloadData.hpp:492
PolymorphicDowncast.hpp
armnn::Layer
Definition: Layer.hpp:217
armnn::WorkloadFactoryBase::CreateQuantizedLstm
std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:219
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::WorkloadFactoryBase::CreateSpaceToDepth
std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:247
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::WorkloadFactoryBase::CreateStridedSlice
std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:263
armnn::UnaryOperation::Neg
@ Neg
armnn::WorkloadFactoryBase::CreateBatchToSpaceNd
std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:58
armnn::WorkloadFactoryBase::CreateDetectionPostProcess
std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:102
TensorHandle.hpp
armnn::LayerType::Permute
@ Permute
armnn::WorkloadFactoryBase::CreateAddition
std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:46
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonBackendModelContext::GetNumberOfThreads
unsigned int GetNumberOfThreads() const
Definition: NeonBackendModelContext.cpp:58
armnn::WorkloadFactoryBase::CreateConvertFp16ToFp32
std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:74
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::WorkloadFactoryBase::CreateDepthwiseConvolution2d
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:94
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
NeonWorkloads.hpp
armnn::LayerType::Reduce
@ Reduce
armnn::WorkloadFactoryBase::CreateStack
std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:259
armnn::WorkloadFactoryBase::CreateFullyConnected
std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:131
armnn::WorkloadFactoryBase::CreateGather
std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:135
armnn::WorkloadFactoryBase::CreateDepthToSpace
std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:90
armnn::LayerType::Division
@ Division
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
NeonWorkloadFactory.hpp
armnn::WorkloadFactoryBase::CreateReshape
std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:227
armnn::ITensorHandle
Definition: ITensorHandle.hpp:15
armnn::WorkloadFactoryBase::CreateMaximum
std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:155
armnn::LayerType::Activation
@ Activation
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Normalization
@ Normalization
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::WorkloadFactoryBase::CreateTranspose
std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:271
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::LayerType::Reshape
@ Reshape
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::WorkloadFactoryBase::CreateSubtraction
std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:251
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
armnn::MultiplicationQueueDescriptor
Definition: WorkloadData.hpp:259
armnn::WorkloadFactoryBase::CreateMultiplication
std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:179
armnn::NeonWorkloadFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Definition: NeonWorkloadFactory.cpp:111
armnn::WorkloadFactoryBase::CreateInput
std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:24
armnn::LayerType::Rank
@ Rank
armnn::BinaryOperation::Mul
@ Mul
armnn::PreluQueueDescriptor
Definition: WorkloadData.hpp:534
NeonBackendId.hpp
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::InputQueueDescriptor
MemCopyQueueDescriptor InputQueueDescriptor
Definition: WorkloadData.hpp:91
armnn::WorkloadFactoryBase::CreatePrelu
std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:211
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::DataType::Float32
@ Float32
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1548
armnn::WorkloadFactoryBase::CreateActivation
std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:42
armnn::LayerType::GatherNd
@ GatherNd
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::NeonWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: NeonWorkloadFactory.cpp:34
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
Layer.hpp
armnn::LayerType::Lstm
@ Lstm
NeonWorkloadUtils.hpp
armnn::WorkloadFactoryBase::CreateTransposeConvolution2d
std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:275
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::WorkloadFactoryBase::CreatePooling2d
std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:199
armnn::WorkloadFactoryBase::CreateFloor
std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:127
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::WorkloadFactoryBase::CreateDivision
std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:106
armnn::WorkloadFactoryBase::CreateSpaceToBatchNd
std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:243
armnn::BinaryOperation::Minimum
@ Minimum
armnn::WorkloadFactoryBase::CreateComparison
std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:62
armnn::WorkloadFactoryBase::CreateNormalization
std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:183
armnn::NeonWorkloadFactory::NeonWorkloadFactory
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
Definition: NeonWorkloadFactory.cpp:73
armnn::NeonWorkloadFactory::CreateSubTensorHandle
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
Definition: NeonWorkloadFactory.cpp:86
armnn::WorkloadFactoryBase::CreateInstanceNormalization
std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:139
armnn::WorkloadFactoryBase::CreateSplitter
std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:255
armnn::BinaryOperation::Maximum
@ Maximum
armnn::UnaryOperation::Abs
@ Abs
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::WorkloadFactoryBase::CreateDebug
std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:86
armnn::WorkloadFactoryBase::CreateSlice
std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:235
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::Mean
@ Mean
armnn::WorkloadFactoryBase::CreateLstm
std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:151
armnn::WorkloadFactoryBase::CreateConvertFp32ToFp16
std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:78
MakeWorkloadHelper.hpp
armnn::BinaryOperation::Add
@ Add
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::WorkloadFactoryBase::CreateOutput
std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:187
armnn::WorkloadFactoryBase::CreateResize
std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:231
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::AbsQueueDescriptor
Definition: WorkloadData.hpp:646
armnn::UnaryOperation::LogicalNot
@ LogicalNot
NeonTensorHandle.hpp
armnn::UnaryOperation::Sin
@ Sin
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonWorkloadFactory::GetBackendId
const BackendId & GetBackendId() const override
Definition: NeonWorkloadFactory.cpp:49
armnn::SubtractionQueueDescriptor
Definition: WorkloadData.hpp:271
armnn::ActivationQueueDescriptor
Definition: WorkloadData.hpp:158
armnn::LayerType::Concat
@ Concat
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
NumericCast.hpp
armnn::WorkloadFactoryBase::CreatePermute
std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:195
armnn::NeonWorkloadFactory::CreateWorkload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Definition: NeonWorkloadFactory.cpp:134
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::WorkloadFactoryBase::CreateElementwiseUnary
std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
Definition: WorkloadFactoryBase.hpp:110
armnn::WorkloadFactoryBase::CreateL2Normalization
std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:143
armnn::LayerType::Cast
@ Cast
IgnoreUnused.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::MaximumQueueDescriptor
Definition: WorkloadData.hpp:277
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::WorkloadFactoryBase::CreateMemCopy
std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:163
MemImportWorkload.hpp
armnn::LayerType::Output
@ Output
armnn::WorkloadFactoryBase::CreateDequantize
std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:98
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::WorkloadFactoryBase::CreateConvolution2d
std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:82
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::BinaryOperation::Div
@ Div
armnn::LayerType::Prelu
@ Prelu
armnn::WorkloadFactoryBase::CreateSoftmax
std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:239
armnn::WorkloadFactoryBase::CreateRank
std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:223
armnn::LayerType::Dequantize
@ Dequantize
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::OutputQueueDescriptor
MemCopyQueueDescriptor OutputQueueDescriptor
Definition: WorkloadData.hpp:92
armnn::UnaryOperation::Log
@ Log
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::BoostLogSeverityMapping::info
@ info
armnn::WorkloadFactoryBase::CreateArgMinMax
std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:50
MemCopyWorkload.hpp
armnn::WorkloadFactoryBase::CreateConstant
std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &, const WorkloadInfo &) const override
Definition: WorkloadFactoryBase.hpp:70
armnn::LayerType::PreCompiled
@ PreCompiled