ArmNN
 22.11
NeonWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonBackendId.hpp"
8 #include "NeonTensorHandle.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Utils.hpp>
17 
22 
25 
26 namespace armnn
27 {
28 
29 namespace
30 {
31 static const BackendId s_Id{NeonBackendId()};
32 }
33 
35  Optional<DataType> dataType,
36  std::string& outReasonIfUnsupported)
37 {
38  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39 }
40 
42  Optional<DataType> dataType,
43  std::string& outReasonIfUnsupported,
44  const ModelOptions& modelOptions)
45 {
46  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47 }
48 
50 {
51  return s_Id;
52 }
53 
54 void NeonWorkloadFactory::SetNumberOfThreads()
55 {
56  if (m_ModelContextPtr)
57  {
58  const unsigned int MIN_THREADS = 1;
59  const unsigned int MAX_THREADS = 64;
60 
61  // Set the number of threads to be used if the user has set NumberOfThreads param
62  // Only set if within limit or valid input
63  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
64  auto numberOfThreads = modelOptions->GetNumberOfThreads();
65 
66  if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS)
67  {
68  arm_compute::Scheduler::get().set_num_threads(numberOfThreads);
69  }
70  }
71 }
72 
73 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager)
74  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75 {
76  SetNumberOfThreads();
77 }
78 
79 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
81  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82 {
83  SetNumberOfThreads();
84 }
85 
86 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
87  TensorShape const& subTensorShape,
88  unsigned int const* subTensorOrigin) const
89 {
90  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91 
93  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95  {
96  // Arm compute indexes tensor coords in reverse order.
97  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99  }
100 
101  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103  {
104  return nullptr;
105  }
106 
107  return std::make_unique<NeonSubTensorHandle>(
108  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109 }
110 
111 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
112  const bool IsMemoryManaged) const
113 {
114  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115  if (IsMemoryManaged)
116  {
117  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118  }
119  return tensorHandle;
120 }
121 
122 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
123  DataLayout dataLayout,
124  const bool IsMemoryManaged) const
125 {
126  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127  if (IsMemoryManaged)
128  {
129  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130  }
131  return tensorHandle;
132 }
133 
134 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
135  const QueueDescriptor& descriptor,
136  const WorkloadInfo& info) const
137 {
138  switch(type)
139  {
140  case LayerType::Activation :
141  {
142  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143  return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144  }
145  case LayerType::Addition :
146  {
147  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148  return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149  }
150  case LayerType::ArgMinMax :
151  {
152  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153  return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154  }
156  {
157  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
158  return std::make_unique<NeonBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
159  }
161  {
162  auto batchNormalizationQueueDescriptor
163  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
164  return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
165  }
167  {
168  auto batchToSpaceNdQueueDescriptor
169  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
170  return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
171  }
172  case LayerType::Cast :
173  {
174  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
175  return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
176  }
178  {
179  auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
180  return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
181  }
182  case LayerType::Comparison :
183  {
184  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
185  return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
186  }
187  case LayerType::Concat :
188  {
189  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
190  return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
191  }
192  case LayerType::Constant :
193  {
194  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
195  return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
196  }
198  {
199  auto convertBf16ToFp32QueueDescriptor
200  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
201  return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
202  }
204  {
205  auto convertFp16ToFp32QueueDescriptor
206  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
207  return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
208  }
210  {
211  auto convertFp32ToBf16QueueDescriptor
212  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
213  return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
214  }
216  {
217  auto convertFp32ToFp16QueueDescriptor
218  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
219  return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
220  }
222  {
223  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
224 
225  bool isFastMathEnabled = false;
226  if (m_ModelContextPtr)
227  {
228  if (m_ModelContextPtr.get() != nullptr)
229  {
230  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
231  if (modelOptions)
232  {
233  isFastMathEnabled = modelOptions->IsFastMathEnabled();
234  }
235  }
236  }
237  return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
238  info,
239  m_MemoryManager->GetIntraLayerManager(),
240  isFastMathEnabled);
241  }
243  {
244  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
245 
246  bool isFastMathEnabled = false;
247  if (m_ModelContextPtr)
248  {
249  if (m_ModelContextPtr.get() != nullptr)
250  {
251  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
252  if (modelOptions)
253  {
254  isFastMathEnabled = modelOptions->IsFastMathEnabled();
255  }
256  }
257  }
258  return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
259  info,
260  m_MemoryManager->GetIntraLayerManager(),
261  isFastMathEnabled);
262  }
263  case LayerType::Debug :
264  {
265  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
266  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
267  }
269  {
270  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
271  return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
272  }
274  {
275  auto depthwiseConvolution2dQueueDescriptor
276  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
277  return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
278  }
279  case LayerType::Dequantize :
280  {
281  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
282  return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
283  }
285  {
286  auto detectionPostProcessQueueDescriptor
287  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
288  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
289  }
290  case LayerType::Division :
291  {
292  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
293  return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
294  }
296  {
297  auto elementwiseUnaryQueueDescriptor
298  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
299 
300  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
301  {
302  case UnaryOperation::Abs:
303  {
304  AbsQueueDescriptor absQueueDescriptor;
305  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
306  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
307 
308  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
309  }
310  case UnaryOperation::Exp:
311  return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
313  return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
314  case UnaryOperation::Log:
315  return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
316  case UnaryOperation::Neg:
317  return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
319  {
320  RsqrtQueueDescriptor rsqrtQueueDescriptor;
321  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
322  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
323 
324  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
325  }
326  case UnaryOperation::Sin:
327  return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
329  return std::make_unique<NeonSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info);
330  default:
331  return nullptr;
332  }
333  }
334  case LayerType::Fill :
335  {
336  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
337  return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
338  }
339  case LayerType::Floor :
340  {
341  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
342  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
343  }
345  {
346  auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
347  return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
348  info,
349  m_MemoryManager->GetIntraLayerManager());
350  }
351  case LayerType::Gather :
352  {
353  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
354  return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
355  }
356  case LayerType::GatherNd :
357  {
358  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
359  return std::make_unique<NeonGatherNdWorkload>(*gatherNdQueueDescriptor, info);
360  }
361  case LayerType::Input :
362  {
363  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
364  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
365  }
367  {
368  auto instanceNormalizationQueueDescriptor
369  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
370  return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
371  }
373  {
374  auto l2NormalizationQueueDescriptor
375  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
376  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
377  (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
378  }
379  case LayerType::LogSoftmax :
380  {
381  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
382  return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
383  info,
384  m_MemoryManager->GetIntraLayerManager());
385  }
387  {
388  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
389 
390  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
391  {
393  return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
395  return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
396  default:
397  return nullptr;
398  }
399  }
400  case LayerType::Lstm :
401  {
402  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
403  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
404  }
405  case LayerType::Maximum :
406  {
407  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
408  return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
409  }
410  case LayerType::Mean :
411  {
412  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
413  return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
414  }
415  case LayerType::MemCopy :
416  {
417  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
418  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
419  {
420  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
421  }
422  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
423  }
424  case LayerType::MemImport :
425  {
426  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
427  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
428  {
429  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
430  }
431  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
432  }
433  case LayerType::Minimum :
434  {
435  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
436  return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
437  }
439  {
440  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
441  return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
442  }
444  {
445  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
446  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
447  (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
448  }
449  case LayerType::Output :
450  {
451  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
452  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
453  }
454  case LayerType::Pad :
455  {
456  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
457  return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
458  }
459  case LayerType::Permute :
460  {
461  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
462  return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
463  }
464  case LayerType::Pooling2d :
465  {
466  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
467  return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
468  }
469  case LayerType::Pooling3d :
470  {
471  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
472  return std::make_unique<NeonPooling3dWorkload>(*pooling3dQueueDescriptor, info);
473  }
475  {
476  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
477  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
478  }
479  case LayerType::Prelu :
480  {
481  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
482  return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
483  }
484  case LayerType::QLstm :
485  {
486  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
487  return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
488  }
489  case LayerType::Quantize :
490  {
491  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
492  return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
493  }
495  {
496  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
497  return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
498  }
499  case LayerType::Rank :
500  {
501  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
502  return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
503  }
504  case LayerType::Reduce :
505  {
506  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
507  return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
508  }
509  case LayerType::Reshape :
510  {
511  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
512  return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
513  }
514  case LayerType::Resize :
515  {
516  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
517  return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
518  }
519  case LayerType::Slice :
520  {
521  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
522  return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
523  }
524  case LayerType::Softmax :
525  {
526  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
527  return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
528  info,
529  m_MemoryManager->GetIntraLayerManager());
530  }
532  {
533  auto spaceToBatchNdQueueDescriptor
534  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
535  return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
536  }
538  {
539  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
540  return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
541  }
542  case LayerType::Splitter :
543  {
544  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
545  return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
546  }
547  case LayerType::Stack :
548  {
549  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
550  return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
551  }
553  {
554  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
555  return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
556  }
558  {
559  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
560  return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
561  }
562  case LayerType::Transpose :
563  {
564  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
565  return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
566  }
568  {
569  auto transposeConvolution2dQueueDescriptor
570  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
571  return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
572  info,
573  m_MemoryManager->GetIntraLayerManager());
574  }
576  {
577  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
578 
579  if ((info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
580  (info.m_InputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
581  (info.m_InputTensorInfos[2].GetDataType() == armnn::DataType::Float32) &&
582  (info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
583  (info.m_OutputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
584  (info.m_OutputTensorInfos[2].GetDataType() == armnn::DataType::Float32))
585  {
586  return std::make_unique<NeonUnidirectionalSequenceLstmFloatWorkload>(*desc, info);
587  }
588  else
589  {
590  return std::make_unique<NeonUnidirectionalSequenceLstmWorkload>(*desc, info);
591  }
592  }
593  default:
594  return nullptr;
595  }
596 }
597 
598 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
599  const WorkloadInfo& info) const
600 {
601  return std::make_unique<NeonActivationWorkload>(descriptor, info);
602 }
603 
604 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
605  const WorkloadInfo& info) const
606 {
607  return std::make_unique<NeonAdditionWorkload>(descriptor, info);
608 }
609 
610 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
611  const WorkloadInfo& info) const
612 {
613  return std::make_unique<NeonArgMinMaxWorkload>(descriptor, info);
614 }
615 
616 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
617  const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
618 {
619  return std::make_unique<NeonBatchNormalizationWorkload>(descriptor, info);
620 }
621 
622 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
623  const WorkloadInfo& info) const
624 {
625  return std::make_unique<NeonBatchToSpaceNdWorkload>(descriptor, info);
626 }
627 
628 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
629  const WorkloadInfo& info) const
630 {
631  return std::make_unique<NeonCastWorkload>(descriptor, info);
632 }
633 
634 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
635  const WorkloadInfo& info) const
636 {
637  return std::make_unique<NeonChannelShuffleWorkload>(descriptor, info);
638 }
639 
640 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
641  const WorkloadInfo& info) const
642 {
643  return std::make_unique<NeonComparisonWorkload>(descriptor, info);
644 }
645 
646 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
647  const WorkloadInfo& info) const
648 {
649  return std::make_unique<NeonConcatWorkload>(descriptor, info);
650 }
651 
652 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
653  const WorkloadInfo& info) const
654 {
655  return std::make_unique<NeonConstantWorkload>(descriptor, info);
656 }
657 
658 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertBf16ToFp32(
659  const ConvertBf16ToFp32QueueDescriptor& descriptor,
660  const WorkloadInfo& info) const
661 {
662  return std::make_unique<NeonConvertBf16ToFp32Workload>(descriptor, info);
663 }
664 
665 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
666  const ConvertFp16ToFp32QueueDescriptor& descriptor,
667  const WorkloadInfo& info) const
668 {
669  return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info);
670 }
671 
672 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToBf16(
673  const ConvertFp32ToBf16QueueDescriptor& descriptor,
674  const WorkloadInfo& info) const
675 {
676  return std::make_unique<NeonConvertFp32ToBf16Workload>(descriptor, info);
677 }
678 
679 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
680  const ConvertFp32ToFp16QueueDescriptor& descriptor,
681  const WorkloadInfo& info) const
682 {
683  return std::make_unique<NeonConvertFp32ToFp16Workload>(descriptor, info);
684 }
685 
686 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d(
687  const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
688 {
689  bool isFastMathEnabled = false;
690  if (m_ModelContextPtr)
691  {
692  if (m_ModelContextPtr.get() != nullptr)
693  {
694  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
695  if (modelOptions)
696  {
697  isFastMathEnabled = modelOptions->IsFastMathEnabled();
698  }
699  }
700  }
701  return std::make_unique<NeonConvolution2dWorkload>(descriptor,
702  info,
703  m_MemoryManager->GetIntraLayerManager(),
704  isFastMathEnabled);
705 }
706 
707 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution3d(
708  const Convolution3dQueueDescriptor& descriptor, const WorkloadInfo& info) const
709 {
710  bool isFastMathEnabled = false;
711  if (m_ModelContextPtr)
712  {
713  if (m_ModelContextPtr.get() != nullptr)
714  {
715  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
716  if (modelOptions)
717  {
718  isFastMathEnabled = modelOptions->IsFastMathEnabled();
719  }
720  }
721  }
722  return std::make_unique<NeonConvolution3dWorkload>(descriptor,
723  info,
724  m_MemoryManager->GetIntraLayerManager(),
725  isFastMathEnabled);
726 }
727 
728 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
729  const WorkloadInfo& info) const
730 {
731  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
732 }
733 
734 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
735  const WorkloadInfo& info) const
736 {
737  return std::make_unique<NeonDepthToSpaceWorkload>(descriptor, info);
738 }
739 
740 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
741  const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
742 {
743  return std::make_unique<NeonDepthwiseConvolutionWorkload>(descriptor, info);
744 }
745 
746 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
747  const WorkloadInfo& info) const
748 {
749  return std::make_unique<NeonDequantizeWorkload>(descriptor, info);
750 }
751 
752 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
753  const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
754 {
755  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
756 }
757 
758 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
759  const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
760 {
761  return std::make_unique<NeonDivisionWorkload>(descriptor, info);
762 }
763 
764 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
765  const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const
766 {
767  switch(descriptor.m_Parameters.m_Operation)
768  {
769  case UnaryOperation::Abs:
770  {
771  AbsQueueDescriptor absQueueDescriptor;
772  absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
773  absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
774 
775  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
776  }
777  case UnaryOperation::Exp:
778  return std::make_unique<NeonExpWorkload>(descriptor, info);
780  return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
781  case UnaryOperation::Log:
782  return std::make_unique<NeonLogWorkload>(descriptor, info);
783  case UnaryOperation::Neg:
784  return std::make_unique<NeonNegWorkload>(descriptor, info);
786  {
787  RsqrtQueueDescriptor rsqrtQueueDescriptor;
788  rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
789  rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
790 
791  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
792  }
793  case UnaryOperation::Sin:
794  return std::make_unique<NeonSinWorkload>(descriptor, info);
795  default:
796  return nullptr;
797  }
798 }
799 
800 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
801  const WorkloadInfo& info) const
802 {
803  return std::make_unique<NeonFillWorkload>(descriptor, info);
804 }
805 
806 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
807  const WorkloadInfo& info) const
808 {
809  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
810 }
811 
812 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
813  const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
814 {
815  return std::make_unique<NeonFullyConnectedWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
816 }
817 
818 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
819  const armnn::WorkloadInfo& info) const
820 {
821  return std::make_unique<NeonGatherWorkload>(descriptor, info);
822 }
823 
824 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
825  const WorkloadInfo& info) const
826 {
827  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
828 }
829 
830 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInstanceNormalization(
831  const InstanceNormalizationQueueDescriptor& descriptor,
832  const WorkloadInfo& info) const
833 {
834  return std::make_unique<NeonInstanceNormalizationWorkload>(descriptor, info);
835 }
836 
837 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
838  const WorkloadInfo& info) const
839 {
840  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
841  m_MemoryManager->GetIntraLayerManager());
842 }
843 
844 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
845  const WorkloadInfo& info) const
846 {
847  return std::make_unique<NeonLogSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
848 }
849 
850 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
851  const WorkloadInfo& info) const
852 {
853  switch(descriptor.m_Parameters.m_Operation)
854  {
856  return std::make_unique<NeonLogicalAndWorkload>(descriptor, info);
858  return std::make_unique<NeonLogicalOrWorkload>(descriptor, info);
859  default:
860  return nullptr;
861  }
862 }
863 
864 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
865  const WorkloadInfo& info) const
866 {
867  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
868 }
869 
870 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
871  const WorkloadInfo& info) const
872 {
873  return std::make_unique<NeonMaximumWorkload>(descriptor, info);
874 }
875 
876 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
877  const WorkloadInfo& info) const
878 {
879  return std::make_unique<NeonMeanWorkload>(descriptor, info);
880 }
881 
882 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
883  const WorkloadInfo& info) const
884 {
885  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
886  {
887  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
888  }
889 
890  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
891 }
892 
893 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
894  const WorkloadInfo& info) const
895 {
896  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
897  {
898  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
899  }
900 
901  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
902 }
903 
904 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
905  const WorkloadInfo& info) const
906 {
907  return std::make_unique<NeonMinimumWorkload>(descriptor, info);
908 }
909 
910 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
911  const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
912 {
913  return std::make_unique<NeonMultiplicationWorkload>(descriptor, info);
914 }
915 
916 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
917  const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
918 {
919  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
920  m_MemoryManager->GetIntraLayerManager());
921 }
922 
923 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
924  const WorkloadInfo& info) const
925 {
926  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
927 }
928 
929 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
930  const WorkloadInfo& info) const
931 {
932  return std::make_unique<NeonPadWorkload>(descriptor, info);
933 }
934 
935 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
936  const WorkloadInfo& info) const
937 {
938  return std::make_unique<NeonPermuteWorkload>(descriptor, info);
939 }
940 
941 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
942  const WorkloadInfo& info) const
943 {
944  return std::make_unique<NeonPooling2dWorkload>(descriptor, info);
945 }
946 
947 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
948  const WorkloadInfo& info) const
949 {
950  return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
951 }
952 
953 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePrelu(const armnn::PreluQueueDescriptor &descriptor,
954  const armnn::WorkloadInfo &info) const
955 {
956  return std::make_unique<NeonPreluWorkload>(descriptor, info);
957 }
958 
959 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
960  const WorkloadInfo& info) const
961 {
962  return std::make_unique<NeonQLstmWorkload>(descriptor, info);
963 }
964 
965 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
966  const WorkloadInfo& info) const
967 {
968  return std::make_unique<NeonQuantizeWorkload>(descriptor, info);
969 }
970 
971 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
972  const WorkloadInfo& info) const
973 {
974  return std::make_unique<NeonQuantizedLstmWorkload>(descriptor, info);
975 }
976 
977 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
978  const WorkloadInfo& info) const
979 {
980  return std::make_unique<NeonRankWorkload>(descriptor, info);
981 }
982 
983 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
984  const WorkloadInfo& info) const
985 {
986  return std::make_unique<NeonReduceWorkload>(descriptor, info);
987 }
988 
989 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
990  const WorkloadInfo& info) const
991 {
992  return std::make_unique<NeonReshapeWorkload>(descriptor, info);
993 }
994 
995 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
996  const WorkloadInfo& info) const
997 {
998  return std::make_unique<NeonResizeWorkload>(descriptor, info);
999 }
1000 
1001 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1002  const WorkloadInfo& info) const
1003 {
1004  return std::make_unique<NeonSliceWorkload>(descriptor, info);
1005 }
1006 
1007 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1008  const WorkloadInfo& info) const
1009 {
1010  return std::make_unique<NeonSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
1011 }
1012 
1013 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1014  const WorkloadInfo& info) const
1015 {
1016  return std::make_unique<NeonSpaceToBatchNdWorkload>(descriptor, info);
1017 }
1018 
1019 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1020  const WorkloadInfo& info) const
1021 {
1022  return std::make_unique<NeonSpaceToDepthWorkload>(descriptor, info);
1023 }
1024 
1025 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1026  const WorkloadInfo& info) const
1027 {
1028  return std::make_unique<NeonSplitterWorkload>(descriptor, info);
1029 }
1030 
1031 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1032  const WorkloadInfo& info) const
1033 {
1034  return std::make_unique<NeonStackWorkload>(descriptor, info);
1035 }
1036 
1037 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1038  const WorkloadInfo& info) const
1039 {
1040  return std::make_unique<NeonStridedSliceWorkload>(descriptor, info);
1041 }
1042 
1043 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
1044  const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
1045 {
1046  return std::make_unique<NeonSubtractionWorkload>(descriptor, info);
1047 }
1048 
1049 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1050  const WorkloadInfo& info) const
1051 {
1052  return std::make_unique<NeonTransposeWorkload>(descriptor, info);
1053 }
1054 
1055 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateTransposeConvolution2d(
1056  const TransposeConvolution2dQueueDescriptor &descriptor,
1057  const WorkloadInfo &info) const
1058 {
1059  return std::make_unique<NeonTransposeConvolution2dWorkload>(descriptor, info,
1060  m_MemoryManager->GetIntraLayerManager());
1061 }
1062 
1063 } // namespace armnn
std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &, const WorkloadInfo &) const override
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
DataLayout
Definition: Types.hpp:62
std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::vector< BackendOptions > ModelOptions
std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &, const WorkloadInfo &) const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
constexpr const char * NeonBackendId()
std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &, const WorkloadInfo &) const override
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &, const WorkloadInfo &) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &, const WorkloadInfo &) const override
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &, const WorkloadInfo &) const override
std::vector< TensorInfo > m_InputTensorInfos
std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &, const WorkloadInfo &) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &, const WorkloadInfo &) const override
const BackendId & GetBackendId() const override
std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &, const WorkloadInfo &) const override
std::vector< TensorInfo > m_OutputTensorInfos
std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &, const WorkloadInfo &) const override
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &, const WorkloadInfo &) const override
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &, const WorkloadInfo &) const override
std::vector< ITensorHandle * > m_Outputs
std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &, const WorkloadInfo &) const override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &, const WorkloadInfo &) const override
Contains information about TensorInfos of a layer.
std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &, const WorkloadInfo &) const override
std::vector< ITensorHandle * > m_Inputs
std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &, const WorkloadInfo &) const override
Depthwise Convolution 2D layer workload data.
std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468