ArmNN
 23.11
RefWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <Layer.hpp>
6 
11 
12 #include "RefWorkloadFactory.hpp"
13 #include "RefBackendId.hpp"
14 #include "RefTensorHandle.hpp"
16 
17 namespace armnn
18 {
19 
20 namespace
21 {
22 static const BackendId s_Id{RefBackendId()};
23 }
24 template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
25 std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
26  const WorkloadInfo& info) const
27 {
28  return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
29  (descriptor, info);
30 }
31 
32 template <DataType ArmnnType>
33 bool IsDataType(const WorkloadInfo& info)
34 {
35  auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
36  auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
37  if (it != std::end(info.m_InputTensorInfos))
38  {
39  return true;
40  }
41  it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
42  if (it != std::end(info.m_OutputTensorInfos))
43  {
44  return true;
45  }
46  return false;
47 }
48 bool IsSigned64(const WorkloadInfo& info)
49 {
50  return IsDataType<DataType::Signed64>(info);
51 }
52 bool IsSigned32(const WorkloadInfo& info)
53 {
54  return IsDataType<DataType::Signed32>(info);
55 }
56 bool IsBFloat16(const WorkloadInfo& info)
57 {
58  return IsDataType<DataType::BFloat16>(info);
59 }
60 bool IsFloat16(const WorkloadInfo& info)
61 {
62  return IsDataType<DataType::Float16>(info);
63 }
64 bool IsQSymmS16(const WorkloadInfo& info)
65 {
66  return IsDataType<DataType::QSymmS16>(info);
67 }
68 bool IsQSymmS8(const WorkloadInfo& info)
69 {
70  return IsDataType<DataType::QSymmS8>(info);
71 }
72 bool IsQAsymmS8(const WorkloadInfo& info)
73 {
74  return IsDataType<DataType::QAsymmS8>(info);
75 }
76 bool IsQAsymmU8(const WorkloadInfo& info)
77 {
78  return IsDataType<DataType::QAsymmU8>(info);
79 }
80 
81 RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
82  : m_MemoryManager(memoryManager)
83 {
84 }
85 
87  : m_MemoryManager(new RefMemoryManager())
88 {
89 }
90 
92 {
93  return s_Id;
94 }
95 
97  Optional<DataType> dataType,
98  std::string& outReasonIfUnsupported)
99 {
100  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
101 }
102 
104  Optional<DataType> dataType,
105  std::string& outReasonIfUnsupported,
106  const ModelOptions& modelOptions)
107 {
108  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
109 }
110 
111 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
112  const bool isMemoryManaged) const
113 {
114  if (isMemoryManaged)
115  {
116  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
117  }
118  else
119  {
120  return std::make_unique<RefTensorHandle>(tensorInfo);
121  }
122 }
123 
124 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
125  DataLayout dataLayout,
126  const bool isMemoryManaged) const
127 {
128  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
129  // to unmanaged memory. This also ensures memory alignment.
130  IgnoreUnused(isMemoryManaged, dataLayout);
131 
132  if (isMemoryManaged)
133  {
134  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
135  }
136  else
137  {
138  return std::make_unique<RefTensorHandle>(tensorInfo);
139  }
140 }
141 
142 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
143  const QueueDescriptor& descriptor,
144  const WorkloadInfo& info) const
145 {
146  switch(type)
147  {
148  case LayerType::Activation :
149  {
150  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
151  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
152  }
153  case LayerType::Addition :
154  {
155  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
156  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
157  {
158  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
159  }
160  else
161  {
162  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
163  }
164  }
165  case LayerType::ArgMinMax :
166  {
167  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
168  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
169  }
171  {
172  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
173  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
174  }
176  {
177  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
178  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
179  }
181  {
182  auto batchToSpaceNdQueueDescriptor
183  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
184  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
185  }
187  {
188  auto broadcastToQueueDescriptor = PolymorphicDowncast<const BroadcastToQueueDescriptor*>(&descriptor);
189  return std::make_unique<RefBroadcastToWorkload>(*broadcastToQueueDescriptor, info);
190  }
191  case LayerType::Cast :
192  {
193  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
194  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
195  }
197  {
198  auto channelShuffleQueueDescriptor
199  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
200  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
201  }
202  case LayerType::Comparison :
203  {
204  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
205  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
206  }
207  case LayerType::Concat :
208  {
209  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
210  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
211  }
212  case LayerType::Constant :
213  {
214  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
215  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
216  }
218  {
219  auto convertFp16ToFp32QueueDescriptor
220  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
221  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
222  }
224  {
225  auto convertFp32ToFp16QueueDescriptor
226  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
227  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
228  }
230  {
231  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
232  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
233  }
235  {
236  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
237  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
238  }
239  case LayerType::Debug:
240  {
241  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
242  if (IsBFloat16(info))
243  {
244  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
245  }
246  if (IsFloat16(info))
247  {
248  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
249  }
250  if (IsQSymmS16(info))
251  {
252  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
253  }
254  if (IsQSymmS8(info))
255  {
256  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
257  }
258  if (IsQAsymmU8(info))
259  {
260  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
261  }
262  if (IsQAsymmS8(info))
263  {
264  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
265  }
266  if (IsSigned32(info))
267  {
268  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
269  }
270  if (IsSigned64(info))
271  {
272  return std::make_unique<RefDebugSigned64Workload>(*debugQueueDescriptor, info);
273  }
274  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
275  }
277  {
278  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
279  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
280  }
282  {
283  auto depthwiseConvolution2DQueueDescriptor
284  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
285  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
286  }
288  {
289  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
290  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
291  }
293  {
294  auto detectionPostProcessQueueDescriptor
295  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
296  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
297  }
298  case LayerType::Division:
299  {
300  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
301  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
302  {
303  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
304  }
305  else
306  {
307  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
308  }
309  }
311  {
312  auto elementwiseBinaryQueueDescriptor
313  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
314  return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
315  }
317  {
318  auto elementwiseUnaryQueueDescriptor
319  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
320  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
321  {
322  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
323  }
324  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
325  }
327  {
328  auto fakeQuantizationQueueDescriptor
329  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
330  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
331  }
332  case LayerType::Fill:
333  {
334  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
335  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
336  }
337  case LayerType::Floor:
338  {
339  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
340  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
341  {
342  return nullptr;
343  }
344  else
345  {
346  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
347  }
348  }
350  {
351  auto fullyConnectedQueueDescriptor
352  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
353  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
354  }
355  case LayerType::Gather:
356  {
357  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
358  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
359  }
360  case LayerType::GatherNd:
361  {
362  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
363  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
364  }
365  case LayerType::Input:
366  {
367  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
368  if (info.m_InputTensorInfos.empty() )
369  {
370  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
371  }
372  if (info.m_OutputTensorInfos.empty())
373  {
374  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
375  }
376  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
377  {
378  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
379  "data input and output differ in byte count.");
380  }
381  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
382  }
384  {
385  auto instanceNormalizationQueueDescriptor
386  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
387  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
388  }
390  {
391  auto l2NormalizationQueueDescriptor
392  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
393  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
394  }
396  {
397  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
398  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
399  }
401  {
402  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
403  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
404  }
405  case LayerType::Lstm:
406  {
407  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
408  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
409  }
410  case LayerType::Maximum:
411  {
412  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
413  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
414  {
415  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
416  }
417  else
418  {
419  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
420  }
421  }
422  case LayerType::Mean:
423  {
424  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
425  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
426  }
427  case LayerType::MemCopy:
428  {
429  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
430  if (descriptor.m_Inputs.empty())
431  {
432  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
433  }
434  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
435  }
437  {
438  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
439  if (descriptor.m_Inputs.empty())
440  {
441  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
442  }
443  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
444  }
445  case LayerType::Minimum:
446  {
447  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
448  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
449  {
450  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
451  }
452  else
453  {
454  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
455  }
456  }
458  {
459  auto multiplicationQueueDescriptor
460  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
461  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
462  {
463  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
464  }
465  else
466  {
467  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
468  }
469  }
471  {
472  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
473  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
474  }
475  case LayerType::Output:
476  {
477  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
478  if (info.m_InputTensorInfos.empty() )
479  {
480  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
481  }
482  if (info.m_OutputTensorInfos.empty())
483  {
484  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
485  }
486  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
487  {
488  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
489  "differ in byte count.");
490  }
491  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
492  }
493  case LayerType::Pad:
494  {
495  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
496  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
497  }
498  case LayerType::Permute:
499  {
500  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
501  if (IsQSymmS16(info))
502  {
503  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
504  }
505  else if (IsBFloat16(info))
506  {
507  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
508  }
509  else if (IsQAsymmS8(info))
510  {
511  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
512  }
514  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
515  }
517  {
518  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
519  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
520  }
522  {
523  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
524  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
525  }
527  {
528  return nullptr;
529  }
530  case LayerType::Prelu:
531  {
532  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
533  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
534  }
535  case LayerType::QLstm:
536  {
537  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
538  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
539  }
540  case LayerType::Quantize:
541  {
542  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
543  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
544  }
545  case LayerType::Rank:
546  {
547  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
548  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
549  }
550  case LayerType::Reduce:
551  {
552  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
553  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
554  }
555  case LayerType::Reshape:
556  {
557  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
558  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
559  }
560  case LayerType::Resize:
561  {
562  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
563  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
564  }
566  {
567  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
568  return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
569  }
570  case LayerType::Shape:
571  {
572  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
573  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
574  }
575  case LayerType::Slice:
576  {
577  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
578  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
579  }
580  case LayerType::Softmax:
581  {
582  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
583  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
584  }
586  {
587  auto spaceToBatchNdQueueDescriptor
588  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
589  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
590  }
592  {
593  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
594  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
595  }
596  case LayerType::Splitter:
597  {
598  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
599  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
600  }
601  case LayerType::Stack:
602  {
603  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
604  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
605  }
607  {
608  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
609  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
610  }
612  {
613  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
614  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
615  {
616  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
617  }
618  else
619  {
620  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
621  }
622  }
623  case LayerType::Tile:
624  {
625  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
626  return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
627  }
629  {
630  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
631  if (IsQSymmS16(info))
632  {
633  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
634  }
635  else if (IsBFloat16(info))
636  {
637  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
638  }
639  else if (IsQAsymmS8(info))
640  {
641  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
642  }
643  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
645  (*transposeQueueDescriptor, info);
646  }
648  {
649  auto transposeConvolution2dQueueDescriptor
650  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
651  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
652  }
654  {
655  auto unidirectionalSequenceLstmQueueDescriptor
656  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
657  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
658  info);
659  }
660  default:
661  return nullptr;
662  }
663 }
664 
665 } // namespace armnn
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::IsQAsymmS8
bool IsQAsymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:72
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::IsFloat16
bool IsFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:60
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::IsDataType
bool IsDataType(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:33
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::IsBFloat16
bool IsBFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:56
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::RefTransposeFloat32Workload
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
Definition: RefTransposeWorkload.hpp:29
armnn::IsQSymmS16
bool IsQSymmS16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:64
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::RefWorkloadFactory::GetBackendId
const BackendId & GetBackendId() const override
Definition: RefWorkloadFactory.cpp:91
armnn::RefBackendId
constexpr const char * RefBackendId()
Definition: RefBackendId.hpp:10
armnn::LayerType::Tile
@ Tile
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefPermuteFloat32Workload
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Definition: RefPermuteWorkload.hpp:29
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::IsSigned64
bool IsSigned64(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:48
armnn::Layer
Definition: Layer.hpp:230
armnn::RefPermuteFloat16Workload
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
Definition: RefPermuteWorkload.hpp:28
armnn::LayerType::Slice
@ Slice
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
RefBackendId.hpp
armnn::RefMemoryManager
Definition: RefMemoryManager.hpp:16
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::IsQAsymmU8
bool IsQAsymmU8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:76
armnn::LayerType::Concat
@ Concat
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::RefTransposeFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
Definition: RefTransposeWorkload.hpp:28
armnn::LayerType::Quantize
@ Quantize
armnn::RefWorkloadFactory::RefWorkloadFactory
RefWorkloadFactory()
Definition: RefWorkloadFactory.cpp:86
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::BoostLogSeverityMapping::info
@ info
MakeWorkloadHelper.hpp
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
MemImportWorkload.hpp
armnn::RefWorkloadFactory::CreateWorkload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
Definition: RefWorkloadFactory.cpp:142
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
RefWorkloadFactory.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::RefPermuteQAsymm8Workload
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
Definition: RefPermuteWorkload.hpp:31
TensorHandle.hpp
armnn::RefWorkloadFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Definition: RefWorkloadFactory.cpp:111
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
MemCopyWorkload.hpp
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::IsQuantizedType
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:311
RefWorkloads.hpp
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
Layer.hpp
armnn::NullWorkload
Definition: Workload.hpp:27
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IsSigned32
bool IsSigned32(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:52
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::LayerType::Resize
@ Resize
armnn::RefWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: RefWorkloadFactory.cpp:96
RefTensorHandle.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::QLstm
@ QLstm
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::RefTransposeQAsymm8Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
Definition: RefTransposeWorkload.hpp:31
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::IsQSymmS8
bool IsQSymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:68