ArmNN
 22.02
RefWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <Layer.hpp>
10 #include "RefWorkloadFactory.hpp"
11 #include "RefBackendId.hpp"
13 #include "RefTensorHandle.hpp"
14 
15 
16 namespace armnn
17 {
18 
19 namespace
20 {
21 static const BackendId s_Id{RefBackendId()};
22 }
23 template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
24 std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
25  const WorkloadInfo& info) const
26 {
27  return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
28  (descriptor, info);
29 }
30 
31 template <DataType ArmnnType>
33 {
34  auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
35  auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
36  if (it != std::end(info.m_InputTensorInfos))
37  {
38  return true;
39  }
40  it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
41  if (it != std::end(info.m_OutputTensorInfos))
42  {
43  return true;
44  }
45  return false;
46 }
47 
49 {
50  return IsDataType<DataType::Signed32>(info);
51 }
52 
54 {
55  return IsDataType<DataType::BFloat16>(info);
56 }
57 
59 {
60  return IsDataType<DataType::Float16>(info);
61 }
62 
64 {
65  return IsDataType<DataType::QSymmS16>(info);
66 }
67 
69 {
70  return IsDataType<DataType::QSymmS8>(info);
71 }
72 
74 {
75  return IsDataType<DataType::QAsymmS8>(info);
76 }
77 
79 {
80  return IsDataType<DataType::QAsymmU8>(info);
81 }
82 
83 RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
84  : m_MemoryManager(memoryManager)
85 {
86 }
87 
89  : m_MemoryManager(new RefMemoryManager())
90 {
91 }
92 
94 {
95  return s_Id;
96 }
97 
99  Optional<DataType> dataType,
100  std::string& outReasonIfUnsupported)
101 {
102  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
103 }
104 
106  Optional<DataType> dataType,
107  std::string& outReasonIfUnsupported,
108  const ModelOptions& modelOptions)
109 {
110  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
111 }
112 
113 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
114  const bool isMemoryManaged) const
115 {
116  if (isMemoryManaged)
117  {
118  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
119  }
120  else
121  {
122  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
123  }
124 }
125 
126 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
127  DataLayout dataLayout,
128  const bool isMemoryManaged) const
129 {
130  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
131  // to unmanaged memory. This also ensures memory alignment.
132  IgnoreUnused(isMemoryManaged, dataLayout);
133 
134  if (isMemoryManaged)
135  {
136  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
137  }
138  else
139  {
140  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
141  }
142 }
143 
144 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
145  const QueueDescriptor& descriptor,
146  const WorkloadInfo& info) const
147 {
148  switch(type)
149  {
150  case LayerType::Activation :
151  {
152  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
153  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
154  }
155  case LayerType::Addition :
156  {
157  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
158 
159  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
160  {
161  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
162  }
163  else
164  {
165  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
166  }
167  }
168  case LayerType::ArgMinMax :
169  {
170  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
171  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
172  }
174  {
175  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
177  }
179  {
180  auto batchToSpaceNdQueueDescriptor
181  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183  }
184  case LayerType::Cast :
185  {
186  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
188  }
190  {
191  auto channelShuffleQueueDescriptor
192  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
193  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
194  }
195  case LayerType::Comparison :
196  {
197  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
198  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
199  }
200  case LayerType::Concat :
201  {
202  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
203  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
204  }
205  case LayerType::Constant :
206  {
207  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
208  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
209  }
211  {
212  auto convertBf16ToFp32QueueDescriptor
213  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
214  return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
215  }
217  {
218  auto convertFp16ToFp32QueueDescriptor
219  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
220  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
221  }
223  {
224  auto convertFp32ToBf16QueueDescriptor
225  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
226  return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
227  }
229  {
230  auto convertFp32ToFp16QueueDescriptor
231  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
232  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
233  }
235  {
236  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
237  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
238  }
240  {
241  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
242  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
243  }
244  case LayerType::Debug:
245  {
246  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
247  if (IsBFloat16(info))
248  {
249  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
250  }
251  if (IsFloat16(info))
252  {
253  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
254  }
255  if (IsQSymmS16(info))
256  {
257  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
258  }
259  if (IsQSymmS8(info))
260  {
261  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
262  }
263  if (IsQAsymmU8(info))
264  {
265  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
266  }
267  if (IsQAsymmS8(info))
268  {
269  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
270  }
271  if (IsSigned32(info))
272  {
273  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
274  }
275 
276  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
277  }
279  {
280  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
281  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
282  }
284  {
285  auto depthwiseConvolution2DQueueDescriptor
286  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
287  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
288  }
290  {
291  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
292  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
293  }
295  {
296  auto detectionPostProcessQueueDescriptor
297  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
298  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
299  }
300  case LayerType::Division:
301  {
302  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
303  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
304  {
305  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
306  }
307  else
308  {
309  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
310  }
311  }
313  {
314  auto elementwiseUnaryQueueDescriptor
315  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
316  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
317  {
318  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
319  }
320  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
321  }
323  {
324  auto fakeQuantizationQueueDescriptor
325  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
326  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
327  }
328  case LayerType::Fill:
329  {
330  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
331  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
332  }
333  case LayerType::Floor:
334  {
335  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
336  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
337  {
338  return nullptr;
339  }
340  else
341  {
342  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
343  }
344  }
346  {
347  auto fullyConnectedQueueDescriptor
348  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
349  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
350  }
351  case LayerType::Gather:
352  {
353  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
354  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
355  }
356  case LayerType::Input:
357  {
358  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
359  if (info.m_InputTensorInfos.empty() )
360  {
361  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
362  }
363  if (info.m_OutputTensorInfos.empty())
364  {
365  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
366  }
367 
368  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
369  {
370  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
371  "data input and output differ in byte count.");
372  }
373 
374  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
375  }
377  {
378  auto instanceNormalizationQueueDescriptor
379  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
380  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
381  }
383  {
384  auto l2NormalizationQueueDescriptor
385  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
386  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
387  }
389  {
390  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
391  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
392  }
394  {
395  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
396  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
397  }
398  case LayerType::Lstm:
399  {
400  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
401  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
402  }
403  case LayerType::Maximum:
404  {
405  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
406  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
407  {
408  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
409  }
410  else
411  {
412  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
413  }
414  }
415  case LayerType::Mean:
416  {
417  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
418  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
419  }
420  case LayerType::MemCopy:
421  {
422  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
423  if (descriptor.m_Inputs.empty())
424  {
425  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
426  }
427  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
428  }
430  {
431  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
432  if (descriptor.m_Inputs.empty())
433  {
434  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
435  }
436  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
437  }
438  case LayerType::Minimum:
439  {
440  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
441  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
442  {
443  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
444  }
445  else
446  {
447  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
448  }
449  }
451  {
452  auto multiplicationQueueDescriptor
453  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
454  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
455  {
456  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
457  }
458  else
459  {
460  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
461  }
462  }
464  {
465  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
466  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
467  }
468  case LayerType::Output:
469  {
470  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
471  if (info.m_InputTensorInfos.empty() )
472  {
473  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
474  }
475  if (info.m_OutputTensorInfos.empty())
476  {
477  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
478  }
479  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
480  {
481  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
482  "differ in byte count.");
483  }
484 
485  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
486  }
487  case LayerType::Pad:
488  {
489  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
490  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
491  }
492  case LayerType::Permute:
493  {
494  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
495  if (IsQSymmS16(info))
496  {
497  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
498  }
499  else if (IsBFloat16(info))
500  {
501  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
502  }
503  else if (IsQAsymmS8(info))
504  {
505  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
506  }
508  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
509  }
511  {
512  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
513  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
514  }
516  {
517  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
518  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
519  }
521  {
522  return nullptr;
523  }
524  case LayerType::Prelu:
525  {
526  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
527  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
528  }
529  case LayerType::QLstm:
530  {
531  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
532  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
533  }
534  case LayerType::Quantize:
535  {
536  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
537  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
538  }
539  case LayerType::Rank:
540  {
541  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
542  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
543  }
544  case LayerType::Reduce:
545  {
546  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
547  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
548  }
549  case LayerType::Reshape:
550  {
551  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
552  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
553  }
554  case LayerType::Resize:
555  {
556  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
557  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
558  }
559  case LayerType::Shape:
560  {
561  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
562  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
563  }
564  case LayerType::Slice:
565  {
566  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
567  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
568  }
569  case LayerType::Softmax:
570  {
571  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
572  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
573  }
575  {
576  auto spaceToBatchNdQueueDescriptor
577  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
578  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
579  }
581  {
582  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
583  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
584  }
585  case LayerType::Splitter:
586  {
587  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
588  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
589  }
590  case LayerType::Stack:
591  {
592  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
593  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
594  }
596  {
597  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
598  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
599  }
601  {
602  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
603  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
604  {
605  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
606  }
607  else
608  {
609  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
610  }
611  }
613  {
614  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
615  if (IsQSymmS16(info))
616  {
617  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
618  }
619  else if (IsBFloat16(info))
620  {
621  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
622  }
623  else if (IsQAsymmS8(info))
624  {
625  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
626  }
627  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
628  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
629  (*transposeQueueDescriptor, info);
630  }
632  {
633  auto transposeConvolution2dQueueDescriptor
634  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
635  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
636  }
638  {
639  auto unidirectionalSequenceLstmQueueDescriptor
640  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
641  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
642  info);
643  }
644  default:
645  return nullptr;
646  }
647 }
648 
649 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
650  const WorkloadInfo& info) const
651 {
652  return std::make_unique<RefActivationWorkload>(descriptor, info);
653 }
654 
655 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
656  const WorkloadInfo& info) const
657 {
658  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
659  {
660  return std::make_unique<RefAdditionWorkload<int32_t>>(descriptor, info);
661  }
662  else
663  {
664  return std::make_unique<RefAdditionWorkload<float>>(descriptor, info);
665  }
666 }
667 
668 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
669  const WorkloadInfo& info) const
670 {
671  return std::make_unique<RefArgMinMaxWorkload>(descriptor, info);
672 }
673 
674 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchNormalization(
675  const BatchNormalizationQueueDescriptor& descriptor,
676  const WorkloadInfo& info) const
677 {
678  return std::make_unique<RefBatchNormalizationWorkload>(descriptor, info);
679 }
680 
681 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
682  const WorkloadInfo& info) const
683 {
684  return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
685 }
686 
687 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
688  const WorkloadInfo& info) const
689 {
690  return std::make_unique<RefCastWorkload>(descriptor, info);
691 }
692 
693 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor &descriptor,
694  const WorkloadInfo &info) const
695 {
696  return std::make_unique<RefChannelShuffleWorkload>(descriptor,info);
697 }
698 
699 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
700  const WorkloadInfo& info) const
701 {
702  return std::make_unique<RefComparisonWorkload>(descriptor, info);
703 }
704 
705 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
706  const WorkloadInfo& info) const
707 {
708  return std::make_unique<RefConcatWorkload>(descriptor, info);
709 }
710 
711 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
712  const WorkloadInfo& info) const
713 {
714  return std::make_unique<RefConstantWorkload>(descriptor, info);
715 }
716 
717 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertBf16ToFp32(
718  const ConvertBf16ToFp32QueueDescriptor& descriptor,
719  const WorkloadInfo& info) const
720 {
721  return std::make_unique<RefConvertBf16ToFp32Workload>(descriptor, info);
722 }
723 
724 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32(
725  const ConvertFp16ToFp32QueueDescriptor& descriptor,
726  const WorkloadInfo& info) const
727 {
728  return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info);
729 }
730 
731 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToBf16(
732  const ConvertFp32ToBf16QueueDescriptor& descriptor,
733  const WorkloadInfo& info) const
734 {
735  return std::make_unique<RefConvertFp32ToBf16Workload>(descriptor, info);
736 }
737 
738 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
739  const ConvertFp32ToFp16QueueDescriptor& descriptor,
740  const WorkloadInfo& info) const
741 {
742  return std::make_unique<RefConvertFp32ToFp16Workload>(descriptor, info);
743 }
744 
745 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
746  const WorkloadInfo& info) const
747 {
748  return std::make_unique<RefConvolution2dWorkload>(descriptor, info);
749 }
750 
751 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
752  const WorkloadInfo& info) const
753 {
754  return std::make_unique<RefConvolution3dWorkload>(descriptor, info);
755 }
756 
757 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
758  const WorkloadInfo& info) const
759 {
760  if (IsBFloat16(info))
761  {
762  return std::make_unique<RefDebugBFloat16Workload>(descriptor, info);
763  }
764  if (IsFloat16(info))
765  {
766  return std::make_unique<RefDebugFloat16Workload>(descriptor, info);
767  }
768  if (IsQSymmS16(info))
769  {
770  return std::make_unique<RefDebugQSymmS16Workload>(descriptor, info);
771  }
772  if (IsQSymmS8(info))
773  {
774  return std::make_unique<RefDebugQSymmS8Workload>(descriptor, info);
775  }
776  if (IsQAsymmU8(info))
777  {
778  return std::make_unique<RefDebugQAsymmU8Workload>(descriptor, info);
779  }
780  if (IsQAsymmS8(info))
781  {
782  return std::make_unique<RefDebugQAsymmS8Workload>(descriptor, info);
783  }
784  if (IsSigned32(info))
785  {
786  return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
787  }
788 
789  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(descriptor, info);
790 }
791 
792 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
793  const WorkloadInfo& info) const
794 {
795  return std::make_unique<RefDepthToSpaceWorkload>(descriptor, info);
796 }
797 
798 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthwiseConvolution2d(
799  const DepthwiseConvolution2dQueueDescriptor& descriptor,
800  const WorkloadInfo& info) const
801 {
802  return std::make_unique<RefDepthwiseConvolution2dWorkload>(descriptor, info);
803 }
804 
805 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
806  const WorkloadInfo& info) const
807 {
808  return std::make_unique<RefDequantizeWorkload>(descriptor, info);
809 }
810 
811 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
812  const DetectionPostProcessQueueDescriptor& descriptor,
813  const WorkloadInfo& info) const
814 {
815  return std::make_unique<RefDetectionPostProcessWorkload>(descriptor, info);
816 }
817 
818 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
819  const WorkloadInfo& info) const
820 {
821  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
822  {
823  return std::make_unique<RefDivisionWorkload<int32_t>>(descriptor, info);
824  }
825  else
826  {
827  return std::make_unique<RefDivisionWorkload<float>>(descriptor, info);
828  }
829 }
830 
831 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
832  const WorkloadInfo& info) const
833 {
835  {
836  return std::make_unique<RefLogicalUnaryWorkload>(descriptor, info);
837  }
838  return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info);
839 }
840 
841 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
842  const WorkloadInfo& info) const
843 {
844  return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info);
845 }
846 
847 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
848  const WorkloadInfo& info) const
849 {
850  return std::make_unique<RefFillWorkload>(descriptor, info);
851 }
852 
853 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
854  const WorkloadInfo& info) const
855 {
856  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
857  {
858  return nullptr;
859  }
860  else
861  {
862  return std::make_unique<RefFloorWorkload>(descriptor, info);
863  }
864 }
865 
866 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFullyConnected(
867  const FullyConnectedQueueDescriptor& descriptor,
868  const WorkloadInfo& info) const
869 {
870  return std::make_unique<RefFullyConnectedWorkload>(descriptor, info);
871 }
872 
873 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
874  const WorkloadInfo& info) const
875 {
876  return std::make_unique<RefGatherWorkload>(descriptor, info);
877 }
878 
879 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
880  const WorkloadInfo& info) const
881 {
882  if (info.m_InputTensorInfos.empty() )
883  {
884  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
885  }
886  if (info.m_OutputTensorInfos.empty())
887  {
888  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
889  }
890 
891  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
892  {
893  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: data input and output differ in byte count.");
894  }
895 
896  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
897 }
898 
899 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInstanceNormalization(
900  const InstanceNormalizationQueueDescriptor& descriptor,
901  const WorkloadInfo& info) const
902 {
903  return std::make_unique<RefInstanceNormalizationWorkload>(descriptor, info);
904 }
905 
906 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
907  const WorkloadInfo& info) const
908 {
909  return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
910 }
911 
912 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
913  const WorkloadInfo& info) const
914 {
915  return std::make_unique<RefLogicalBinaryWorkload>(descriptor, info);
916 }
917 
918 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
919  const WorkloadInfo& info) const
920 {
921  return std::make_unique<RefLogSoftmaxWorkload>(descriptor, info);
922 }
923 
924 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
925  const WorkloadInfo& info) const
926 {
927  return std::make_unique<RefLstmWorkload>(descriptor, info);
928 }
929 
930 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
931  const WorkloadInfo& info) const
932 {
933  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
934  {
935  return std::make_unique<RefMaximumWorkload<int32_t>>(descriptor, info);
936  }
937  else
938  {
939  return std::make_unique<RefMaximumWorkload<float>>(descriptor, info);
940  }
941 }
942 
943 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
944  const WorkloadInfo& info) const
945 {
946  return std::make_unique<RefMeanWorkload>(descriptor, info);
947 }
948 
949 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
950  const WorkloadInfo& info) const
951 {
952  if (descriptor.m_Inputs.empty())
953  {
954  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
955  }
956  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
957 }
958 
959 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
960  const WorkloadInfo& info) const
961 {
962  if (descriptor.m_Inputs.empty())
963  {
964  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
965  }
966  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
967 }
968 
969 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
970  const WorkloadInfo& info) const
971 {
972  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
973  {
974  return std::make_unique<RefMinimumWorkload<int32_t>>(descriptor, info);
975  }
976  else
977  {
978  return std::make_unique<RefMinimumWorkload<float>>(descriptor, info);
979  }
980 }
981 
982 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
983  const WorkloadInfo& info) const
984 {
985  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
986  {
987  return std::make_unique<RefMultiplicationWorkload<int32_t>>(descriptor, info);
988  }
989  else
990  {
991  return std::make_unique<RefMultiplicationWorkload<float>>(descriptor, info);
992  }
993 }
994 
995 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
996  const WorkloadInfo& info) const
997 {
998  return std::make_unique<RefNormalizationWorkload>(descriptor, info);
999 }
1000 
1001 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1002  const WorkloadInfo& info) const
1003 {
1004  if (info.m_InputTensorInfos.empty() )
1005  {
1006  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
1007  }
1008  if (info.m_OutputTensorInfos.empty())
1009  {
1010  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
1011  }
1012  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
1013  {
1014  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output differ in byte count.");
1015  }
1016 
1017  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
1018 }
1019 
1020 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1021  const WorkloadInfo& info) const
1022 {
1023  return std::make_unique<RefPadWorkload>(descriptor, info);
1024 }
1025 
1026 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
1027  const WorkloadInfo& info) const
1028 {
1029  if (IsQSymmS16(info))
1030  {
1031  return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
1032  }
1033  else if (IsBFloat16(info))
1034  {
1035  return std::make_unique<RefPermuteBFloat16Workload>(descriptor, info);
1036  }
1037  else if (IsQAsymmS8(info))
1038  {
1039  return std::make_unique<RefPermuteQAsymmS8Workload>(descriptor, info);
1040  }
1042  NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
1043 }
1044 
1045 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1046  const WorkloadInfo& info) const
1047 {
1048  return std::make_unique<RefPooling2dWorkload>(descriptor, info);
1049 }
1050 
1051 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
1052  const WorkloadInfo& info) const
1053 {
1054  return std::make_unique<RefPooling3dWorkload>(descriptor, info);
1055 }
1056 
1057 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1058  const WorkloadInfo& /*info*/) const
1059 {
1060  return nullptr;
1061 }
1062 
1063 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor,
1064  const WorkloadInfo& info) const
1065 {
1066  return std::make_unique<RefPreluWorkload>(descriptor, info);
1067 }
1068 
1069 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
1070  const WorkloadInfo& info) const
1071 {
1072  return std::make_unique<RefQLstmWorkload>(descriptor, info);
1073 }
1074 
1075 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1076  const WorkloadInfo& info) const
1077 {
1078  return std::make_unique<RefQuantizeWorkload>(descriptor, info);
1079 }
1080 
1081 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
1082  const WorkloadInfo& info) const
1083 {
1084  return std::make_unique<RefRankWorkload>(descriptor, info);
1085 }
1086 
1087 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
1088  const WorkloadInfo& info) const
1089 {
1090  return std::make_unique<RefReduceWorkload>(descriptor, info);
1091 }
1092 
1093 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1094  const WorkloadInfo& info) const
1095 {
1096  return std::make_unique<RefReshapeWorkload>(descriptor, info);
1097 }
1098 
1099 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1100  const WorkloadInfo& info) const
1101 {
1102  return std::make_unique<RefResizeWorkload>(descriptor, info);
1103 }
1104 
1105 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateShape(const ShapeQueueDescriptor& descriptor,
1106  const WorkloadInfo& info) const
1107 {
1108  return std::make_unique<RefShapeWorkload>(descriptor, info);
1109 }
1110 
1111 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1112  const WorkloadInfo& info) const
1113 {
1114  return std::make_unique<RefSliceWorkload>(descriptor, info);
1115 }
1116 
1117 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1118  const WorkloadInfo& info) const
1119 {
1120  return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
1121 }
1122 
1123 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1124  const WorkloadInfo& info) const
1125 {
1126  return std::make_unique<RefSpaceToBatchNdWorkload>(descriptor, info);
1127 }
1128 
1129 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1130  const WorkloadInfo& info) const
1131 {
1132  return std::make_unique<RefSpaceToDepthWorkload>(descriptor, info);
1133 }
1134 
1135 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1136  const WorkloadInfo& info) const
1137 {
1138  return std::make_unique<RefSplitterWorkload>(descriptor, info);
1139 }
1140 
1141 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1142  const WorkloadInfo& info) const
1143 {
1144  return std::make_unique<RefStackWorkload>(descriptor, info);
1145 }
1146 
1147 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1148  const WorkloadInfo& info) const
1149 {
1150  return std::make_unique<RefStridedSliceWorkload>(descriptor, info);
1151 }
1152 
1153 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1154  const WorkloadInfo& info) const
1155 {
1156  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
1157  {
1158  return std::make_unique<RefSubtractionWorkload<int32_t>>(descriptor, info);
1159  }
1160  else
1161  {
1162  return std::make_unique<RefSubtractionWorkload<float>>(descriptor, info);
1163  }
1164 }
1165 
1166 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1167  const WorkloadInfo& info) const
1168 {
1169  if (IsQSymmS16(info))
1170  {
1171  return std::make_unique<RefTransposeQSymm16Workload>(descriptor, info);
1172  }
1173  else if (IsBFloat16(info))
1174  {
1175  return std::make_unique<RefTransposeBFloat16Workload>(descriptor, info);
1176  }
1177  else if (IsQAsymmS8(info))
1178  {
1179  return std::make_unique<RefTransposeQAsymmS8Workload>(descriptor, info);
1180  }
1182  NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
1183 }
1184 
1185 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
1186  const TransposeConvolution2dQueueDescriptor& descriptor,
1187  const WorkloadInfo& info) const
1188 {
1189  return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
1190 }
1191 
1192 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateUnidirectionalSequenceLstm(
1194  const WorkloadInfo& info) const
1195 {
1196  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(descriptor, info);;
1197 }
1198 
1199 } // namespace armnn
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
DataLayout
Definition: Types.hpp:49
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
constexpr const char * RefBackendId()
std::vector< BackendOptions > ModelOptions
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
bool IsQAsymmS8(const WorkloadInfo &info)
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
bool IsQAsymmU8(const WorkloadInfo &info)
bool IsQSymmS8(const WorkloadInfo &info)
bool IsDataType(const WorkloadInfo &info)
bool IsBFloat16(const WorkloadInfo &info)
const BackendId & GetBackendId() const override
virtual std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
std::vector< TensorInfo > m_InputTensorInfos
bool IsFloat16(const WorkloadInfo &info)
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
bool IsSigned32(const WorkloadInfo &info)
std::vector< TensorInfo > m_OutputTensorInfos
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
bool IsQSymmS16(const WorkloadInfo &info)
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458