ArmNN
 22.08
RefWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <Layer.hpp>
10 #include "RefWorkloadFactory.hpp"
11 #include "RefBackendId.hpp"
13 #include "RefTensorHandle.hpp"
14 
15 
16 namespace armnn
17 {
18 
19 namespace
20 {
21 static const BackendId s_Id{RefBackendId()};
22 }
23 template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
24 std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
25  const WorkloadInfo& info) const
26 {
27  return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
28  (descriptor, info);
29 }
30 
31 template <DataType ArmnnType>
33 {
34  auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
35  auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
36  if (it != std::end(info.m_InputTensorInfos))
37  {
38  return true;
39  }
40  it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
41  if (it != std::end(info.m_OutputTensorInfos))
42  {
43  return true;
44  }
45  return false;
46 }
47 
49 {
50  return IsDataType<DataType::Signed32>(info);
51 }
52 
54 {
55  return IsDataType<DataType::BFloat16>(info);
56 }
57 
59 {
60  return IsDataType<DataType::Float16>(info);
61 }
62 
64 {
65  return IsDataType<DataType::QSymmS16>(info);
66 }
67 
69 {
70  return IsDataType<DataType::QSymmS8>(info);
71 }
72 
74 {
75  return IsDataType<DataType::QAsymmS8>(info);
76 }
77 
79 {
80  return IsDataType<DataType::QAsymmU8>(info);
81 }
82 
83 RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
84  : m_MemoryManager(memoryManager)
85 {
86 }
87 
89  : m_MemoryManager(new RefMemoryManager())
90 {
91 }
92 
94 {
95  return s_Id;
96 }
97 
99  Optional<DataType> dataType,
100  std::string& outReasonIfUnsupported)
101 {
102  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
103 }
104 
106  Optional<DataType> dataType,
107  std::string& outReasonIfUnsupported,
108  const ModelOptions& modelOptions)
109 {
110  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
111 }
112 
113 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
114  const bool isMemoryManaged) const
115 {
116  if (isMemoryManaged)
117  {
118  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
119  }
120  else
121  {
122  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
123  }
124 }
125 
126 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
127  DataLayout dataLayout,
128  const bool isMemoryManaged) const
129 {
130  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
131  // to unmanaged memory. This also ensures memory alignment.
132  IgnoreUnused(isMemoryManaged, dataLayout);
133 
134  if (isMemoryManaged)
135  {
136  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
137  }
138  else
139  {
140  return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
141  }
142 }
143 
144 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
145  const QueueDescriptor& descriptor,
146  const WorkloadInfo& info) const
147 {
148  switch(type)
149  {
150  case LayerType::Activation :
151  {
152  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
153  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
154  }
155  case LayerType::Addition :
156  {
157  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
158 
159  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
160  {
161  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
162  }
163  else
164  {
165  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
166  }
167  }
168  case LayerType::ArgMinMax :
169  {
170  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
171  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
172  }
174  {
175  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
176  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
177  }
179  {
180  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
181  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
182  }
184  {
185  auto batchToSpaceNdQueueDescriptor
186  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
187  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
188  }
189  case LayerType::Cast :
190  {
191  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
192  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
193  }
195  {
196  auto channelShuffleQueueDescriptor
197  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
198  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
199  }
200  case LayerType::Comparison :
201  {
202  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
203  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
204  }
205  case LayerType::Concat :
206  {
207  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
208  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
209  }
210  case LayerType::Constant :
211  {
212  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
213  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
214  }
216  {
217  auto convertBf16ToFp32QueueDescriptor
218  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
219  return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
220  }
222  {
223  auto convertFp16ToFp32QueueDescriptor
224  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
225  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
226  }
228  {
229  auto convertFp32ToBf16QueueDescriptor
230  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
231  return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
232  }
234  {
235  auto convertFp32ToFp16QueueDescriptor
236  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
237  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
238  }
240  {
241  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
242  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
243  }
245  {
246  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
247  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
248  }
249  case LayerType::Debug:
250  {
251  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
252  if (IsBFloat16(info))
253  {
254  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
255  }
256  if (IsFloat16(info))
257  {
258  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
259  }
260  if (IsQSymmS16(info))
261  {
262  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
263  }
264  if (IsQSymmS8(info))
265  {
266  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
267  }
268  if (IsQAsymmU8(info))
269  {
270  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
271  }
272  if (IsQAsymmS8(info))
273  {
274  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
275  }
276  if (IsSigned32(info))
277  {
278  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
279  }
280 
281  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
282  }
284  {
285  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
286  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
287  }
289  {
290  auto depthwiseConvolution2DQueueDescriptor
291  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
292  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
293  }
295  {
296  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
297  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
298  }
300  {
301  auto detectionPostProcessQueueDescriptor
302  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
303  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
304  }
305  case LayerType::Division:
306  {
307  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
308  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
309  {
310  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
311  }
312  else
313  {
314  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
315  }
316  }
318  {
319  auto elementwiseUnaryQueueDescriptor
320  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
321  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
322  {
323  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
324  }
325  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
326  }
328  {
329  auto fakeQuantizationQueueDescriptor
330  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
331  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
332  }
333  case LayerType::Fill:
334  {
335  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
336  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
337  }
338  case LayerType::Floor:
339  {
340  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
341  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
342  {
343  return nullptr;
344  }
345  else
346  {
347  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
348  }
349  }
351  {
352  auto fullyConnectedQueueDescriptor
353  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
354  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
355  }
356  case LayerType::Gather:
357  {
358  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
359  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
360  }
361  case LayerType::GatherNd:
362  {
363  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
364  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
365  }
366  case LayerType::Input:
367  {
368  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
369  if (info.m_InputTensorInfos.empty() )
370  {
371  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
372  }
373  if (info.m_OutputTensorInfos.empty())
374  {
375  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
376  }
377 
378  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
379  {
380  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
381  "data input and output differ in byte count.");
382  }
383 
384  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
385  }
387  {
388  auto instanceNormalizationQueueDescriptor
389  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
390  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
391  }
393  {
394  auto l2NormalizationQueueDescriptor
395  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
396  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
397  }
399  {
400  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
401  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
402  }
404  {
405  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
406  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
407  }
408  case LayerType::Lstm:
409  {
410  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
411  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
412  }
413  case LayerType::Maximum:
414  {
415  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
416  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
417  {
418  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
419  }
420  else
421  {
422  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
423  }
424  }
425  case LayerType::Mean:
426  {
427  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
428  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
429  }
430  case LayerType::MemCopy:
431  {
432  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
433  if (descriptor.m_Inputs.empty())
434  {
435  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
436  }
437  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
438  }
440  {
441  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
442  if (descriptor.m_Inputs.empty())
443  {
444  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
445  }
446  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
447  }
448  case LayerType::Minimum:
449  {
450  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
451  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
452  {
453  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
454  }
455  else
456  {
457  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
458  }
459  }
461  {
462  auto multiplicationQueueDescriptor
463  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
464  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
465  {
466  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
467  }
468  else
469  {
470  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
471  }
472  }
474  {
475  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
476  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
477  }
478  case LayerType::Output:
479  {
480  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
481  if (info.m_InputTensorInfos.empty() )
482  {
483  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
484  }
485  if (info.m_OutputTensorInfos.empty())
486  {
487  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
488  }
489  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
490  {
491  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
492  "differ in byte count.");
493  }
494 
495  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
496  }
497  case LayerType::Pad:
498  {
499  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
500  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
501  }
502  case LayerType::Permute:
503  {
504  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
505  if (IsQSymmS16(info))
506  {
507  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
508  }
509  else if (IsBFloat16(info))
510  {
511  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
512  }
513  else if (IsQAsymmS8(info))
514  {
515  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
516  }
518  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
519  }
521  {
522  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
523  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
524  }
526  {
527  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
528  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
529  }
531  {
532  return nullptr;
533  }
534  case LayerType::Prelu:
535  {
536  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
537  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
538  }
539  case LayerType::QLstm:
540  {
541  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
542  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
543  }
544  case LayerType::Quantize:
545  {
546  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
547  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
548  }
549  case LayerType::Rank:
550  {
551  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
552  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
553  }
554  case LayerType::Reduce:
555  {
556  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
557  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
558  }
559  case LayerType::Reshape:
560  {
561  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
562  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
563  }
564  case LayerType::Resize:
565  {
566  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
567  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
568  }
569  case LayerType::Shape:
570  {
571  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
572  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
573  }
574  case LayerType::Slice:
575  {
576  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
577  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
578  }
579  case LayerType::Softmax:
580  {
581  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
582  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
583  }
585  {
586  auto spaceToBatchNdQueueDescriptor
587  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
588  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
589  }
591  {
592  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
593  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
594  }
595  case LayerType::Splitter:
596  {
597  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
598  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
599  }
600  case LayerType::Stack:
601  {
602  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
603  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
604  }
606  {
607  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
608  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
609  }
611  {
612  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
613  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
614  {
615  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
616  }
617  else
618  {
619  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
620  }
621  }
623  {
624  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
625  if (IsQSymmS16(info))
626  {
627  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
628  }
629  else if (IsBFloat16(info))
630  {
631  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
632  }
633  else if (IsQAsymmS8(info))
634  {
635  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
636  }
637  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
638  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
639  (*transposeQueueDescriptor, info);
640  }
642  {
643  auto transposeConvolution2dQueueDescriptor
644  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
645  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
646  }
648  {
649  auto unidirectionalSequenceLstmQueueDescriptor
650  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
651  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
652  info);
653  }
654  default:
655  return nullptr;
656  }
657 }
658 
659 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
660  const WorkloadInfo& info) const
661 {
662  return std::make_unique<RefActivationWorkload>(descriptor, info);
663 }
664 
665 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
666  const WorkloadInfo& info) const
667 {
668  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
669  {
670  return std::make_unique<RefAdditionWorkload<int32_t>>(descriptor, info);
671  }
672  else
673  {
674  return std::make_unique<RefAdditionWorkload<float>>(descriptor, info);
675  }
676 }
677 
678 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
679  const WorkloadInfo& info) const
680 {
681  return std::make_unique<RefArgMinMaxWorkload>(descriptor, info);
682 }
683 
684 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchNormalization(
685  const BatchNormalizationQueueDescriptor& descriptor,
686  const WorkloadInfo& info) const
687 {
688  return std::make_unique<RefBatchNormalizationWorkload>(descriptor, info);
689 }
690 
691 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
692  const WorkloadInfo& info) const
693 {
694  return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
695 }
696 
697 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
698  const WorkloadInfo& info) const
699 {
700  return std::make_unique<RefCastWorkload>(descriptor, info);
701 }
702 
703 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor &descriptor,
704  const WorkloadInfo &info) const
705 {
706  return std::make_unique<RefChannelShuffleWorkload>(descriptor,info);
707 }
708 
709 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
710  const WorkloadInfo& info) const
711 {
712  return std::make_unique<RefComparisonWorkload>(descriptor, info);
713 }
714 
715 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
716  const WorkloadInfo& info) const
717 {
718  return std::make_unique<RefConcatWorkload>(descriptor, info);
719 }
720 
721 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
722  const WorkloadInfo& info) const
723 {
724  return std::make_unique<RefConstantWorkload>(descriptor, info);
725 }
726 
727 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertBf16ToFp32(
728  const ConvertBf16ToFp32QueueDescriptor& descriptor,
729  const WorkloadInfo& info) const
730 {
731  return std::make_unique<RefConvertBf16ToFp32Workload>(descriptor, info);
732 }
733 
734 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32(
735  const ConvertFp16ToFp32QueueDescriptor& descriptor,
736  const WorkloadInfo& info) const
737 {
738  return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info);
739 }
740 
741 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToBf16(
742  const ConvertFp32ToBf16QueueDescriptor& descriptor,
743  const WorkloadInfo& info) const
744 {
745  return std::make_unique<RefConvertFp32ToBf16Workload>(descriptor, info);
746 }
747 
748 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
749  const ConvertFp32ToFp16QueueDescriptor& descriptor,
750  const WorkloadInfo& info) const
751 {
752  return std::make_unique<RefConvertFp32ToFp16Workload>(descriptor, info);
753 }
754 
755 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
756  const WorkloadInfo& info) const
757 {
758  return std::make_unique<RefConvolution2dWorkload>(descriptor, info);
759 }
760 
761 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
762  const WorkloadInfo& info) const
763 {
764  return std::make_unique<RefConvolution3dWorkload>(descriptor, info);
765 }
766 
767 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
768  const WorkloadInfo& info) const
769 {
770  if (IsBFloat16(info))
771  {
772  return std::make_unique<RefDebugBFloat16Workload>(descriptor, info);
773  }
774  if (IsFloat16(info))
775  {
776  return std::make_unique<RefDebugFloat16Workload>(descriptor, info);
777  }
778  if (IsQSymmS16(info))
779  {
780  return std::make_unique<RefDebugQSymmS16Workload>(descriptor, info);
781  }
782  if (IsQSymmS8(info))
783  {
784  return std::make_unique<RefDebugQSymmS8Workload>(descriptor, info);
785  }
786  if (IsQAsymmU8(info))
787  {
788  return std::make_unique<RefDebugQAsymmU8Workload>(descriptor, info);
789  }
790  if (IsQAsymmS8(info))
791  {
792  return std::make_unique<RefDebugQAsymmS8Workload>(descriptor, info);
793  }
794  if (IsSigned32(info))
795  {
796  return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
797  }
798 
799  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(descriptor, info);
800 }
801 
802 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
803  const WorkloadInfo& info) const
804 {
805  return std::make_unique<RefDepthToSpaceWorkload>(descriptor, info);
806 }
807 
808 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthwiseConvolution2d(
809  const DepthwiseConvolution2dQueueDescriptor& descriptor,
810  const WorkloadInfo& info) const
811 {
812  return std::make_unique<RefDepthwiseConvolution2dWorkload>(descriptor, info);
813 }
814 
815 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
816  const WorkloadInfo& info) const
817 {
818  return std::make_unique<RefDequantizeWorkload>(descriptor, info);
819 }
820 
821 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
822  const DetectionPostProcessQueueDescriptor& descriptor,
823  const WorkloadInfo& info) const
824 {
825  return std::make_unique<RefDetectionPostProcessWorkload>(descriptor, info);
826 }
827 
828 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
829  const WorkloadInfo& info) const
830 {
831  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
832  {
833  return std::make_unique<RefDivisionWorkload<int32_t>>(descriptor, info);
834  }
835  else
836  {
837  return std::make_unique<RefDivisionWorkload<float>>(descriptor, info);
838  }
839 }
840 
841 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
842  const WorkloadInfo& info) const
843 {
845  {
846  return std::make_unique<RefLogicalUnaryWorkload>(descriptor, info);
847  }
848  return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info);
849 }
850 
851 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
852  const WorkloadInfo& info) const
853 {
854  return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info);
855 }
856 
857 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
858  const WorkloadInfo& info) const
859 {
860  return std::make_unique<RefFillWorkload>(descriptor, info);
861 }
862 
863 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
864  const WorkloadInfo& info) const
865 {
866  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
867  {
868  return nullptr;
869  }
870  else
871  {
872  return std::make_unique<RefFloorWorkload>(descriptor, info);
873  }
874 }
875 
876 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFullyConnected(
877  const FullyConnectedQueueDescriptor& descriptor,
878  const WorkloadInfo& info) const
879 {
880  return std::make_unique<RefFullyConnectedWorkload>(descriptor, info);
881 }
882 
883 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
884  const WorkloadInfo& info) const
885 {
886  return std::make_unique<RefGatherWorkload>(descriptor, info);
887 }
888 
889 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
890  const WorkloadInfo& info) const
891 {
892  if (info.m_InputTensorInfos.empty() )
893  {
894  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
895  }
896  if (info.m_OutputTensorInfos.empty())
897  {
898  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
899  }
900 
901  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
902  {
903  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: data input and output differ in byte count.");
904  }
905 
906  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
907 }
908 
909 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInstanceNormalization(
910  const InstanceNormalizationQueueDescriptor& descriptor,
911  const WorkloadInfo& info) const
912 {
913  return std::make_unique<RefInstanceNormalizationWorkload>(descriptor, info);
914 }
915 
916 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
917  const WorkloadInfo& info) const
918 {
919  return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
920 }
921 
922 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
923  const WorkloadInfo& info) const
924 {
925  return std::make_unique<RefLogicalBinaryWorkload>(descriptor, info);
926 }
927 
928 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
929  const WorkloadInfo& info) const
930 {
931  return std::make_unique<RefLogSoftmaxWorkload>(descriptor, info);
932 }
933 
934 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
935  const WorkloadInfo& info) const
936 {
937  return std::make_unique<RefLstmWorkload>(descriptor, info);
938 }
939 
940 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
941  const WorkloadInfo& info) const
942 {
943  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
944  {
945  return std::make_unique<RefMaximumWorkload<int32_t>>(descriptor, info);
946  }
947  else
948  {
949  return std::make_unique<RefMaximumWorkload<float>>(descriptor, info);
950  }
951 }
952 
953 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
954  const WorkloadInfo& info) const
955 {
956  return std::make_unique<RefMeanWorkload>(descriptor, info);
957 }
958 
959 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
960  const WorkloadInfo& info) const
961 {
962  if (descriptor.m_Inputs.empty())
963  {
964  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
965  }
966  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
967 }
968 
969 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
970  const WorkloadInfo& info) const
971 {
972  if (descriptor.m_Inputs.empty())
973  {
974  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
975  }
976  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
977 }
978 
979 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
980  const WorkloadInfo& info) const
981 {
982  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
983  {
984  return std::make_unique<RefMinimumWorkload<int32_t>>(descriptor, info);
985  }
986  else
987  {
988  return std::make_unique<RefMinimumWorkload<float>>(descriptor, info);
989  }
990 }
991 
992 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
993  const WorkloadInfo& info) const
994 {
995  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
996  {
997  return std::make_unique<RefMultiplicationWorkload<int32_t>>(descriptor, info);
998  }
999  else
1000  {
1001  return std::make_unique<RefMultiplicationWorkload<float>>(descriptor, info);
1002  }
1003 }
1004 
1005 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
1006  const WorkloadInfo& info) const
1007 {
1008  return std::make_unique<RefNormalizationWorkload>(descriptor, info);
1009 }
1010 
1011 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1012  const WorkloadInfo& info) const
1013 {
1014  if (info.m_InputTensorInfos.empty() )
1015  {
1016  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
1017  }
1018  if (info.m_OutputTensorInfos.empty())
1019  {
1020  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
1021  }
1022  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
1023  {
1024  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output differ in byte count.");
1025  }
1026 
1027  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
1028 }
1029 
1030 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1031  const WorkloadInfo& info) const
1032 {
1033  return std::make_unique<RefPadWorkload>(descriptor, info);
1034 }
1035 
1036 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
1037  const WorkloadInfo& info) const
1038 {
1039  if (IsQSymmS16(info))
1040  {
1041  return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
1042  }
1043  else if (IsBFloat16(info))
1044  {
1045  return std::make_unique<RefPermuteBFloat16Workload>(descriptor, info);
1046  }
1047  else if (IsQAsymmS8(info))
1048  {
1049  return std::make_unique<RefPermuteQAsymmS8Workload>(descriptor, info);
1050  }
1052  NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
1053 }
1054 
1055 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1056  const WorkloadInfo& info) const
1057 {
1058  return std::make_unique<RefPooling2dWorkload>(descriptor, info);
1059 }
1060 
1061 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
1062  const WorkloadInfo& info) const
1063 {
1064  return std::make_unique<RefPooling3dWorkload>(descriptor, info);
1065 }
1066 
1067 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1068  const WorkloadInfo& /*info*/) const
1069 {
1070  return nullptr;
1071 }
1072 
1073 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor,
1074  const WorkloadInfo& info) const
1075 {
1076  return std::make_unique<RefPreluWorkload>(descriptor, info);
1077 }
1078 
1079 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
1080  const WorkloadInfo& info) const
1081 {
1082  return std::make_unique<RefQLstmWorkload>(descriptor, info);
1083 }
1084 
1085 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1086  const WorkloadInfo& info) const
1087 {
1088  return std::make_unique<RefQuantizeWorkload>(descriptor, info);
1089 }
1090 
1091 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
1092  const WorkloadInfo& info) const
1093 {
1094  return std::make_unique<RefRankWorkload>(descriptor, info);
1095 }
1096 
1097 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
1098  const WorkloadInfo& info) const
1099 {
1100  return std::make_unique<RefReduceWorkload>(descriptor, info);
1101 }
1102 
1103 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1104  const WorkloadInfo& info) const
1105 {
1106  return std::make_unique<RefReshapeWorkload>(descriptor, info);
1107 }
1108 
1109 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1110  const WorkloadInfo& info) const
1111 {
1112  return std::make_unique<RefResizeWorkload>(descriptor, info);
1113 }
1114 
1115 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateShape(const ShapeQueueDescriptor& descriptor,
1116  const WorkloadInfo& info) const
1117 {
1118  return std::make_unique<RefShapeWorkload>(descriptor, info);
1119 }
1120 
1121 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1122  const WorkloadInfo& info) const
1123 {
1124  return std::make_unique<RefSliceWorkload>(descriptor, info);
1125 }
1126 
1127 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1128  const WorkloadInfo& info) const
1129 {
1130  return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
1131 }
1132 
1133 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1134  const WorkloadInfo& info) const
1135 {
1136  return std::make_unique<RefSpaceToBatchNdWorkload>(descriptor, info);
1137 }
1138 
1139 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1140  const WorkloadInfo& info) const
1141 {
1142  return std::make_unique<RefSpaceToDepthWorkload>(descriptor, info);
1143 }
1144 
1145 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1146  const WorkloadInfo& info) const
1147 {
1148  return std::make_unique<RefSplitterWorkload>(descriptor, info);
1149 }
1150 
1151 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1152  const WorkloadInfo& info) const
1153 {
1154  return std::make_unique<RefStackWorkload>(descriptor, info);
1155 }
1156 
1157 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1158  const WorkloadInfo& info) const
1159 {
1160  return std::make_unique<RefStridedSliceWorkload>(descriptor, info);
1161 }
1162 
1163 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1164  const WorkloadInfo& info) const
1165 {
1166  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
1167  {
1168  return std::make_unique<RefSubtractionWorkload<int32_t>>(descriptor, info);
1169  }
1170  else
1171  {
1172  return std::make_unique<RefSubtractionWorkload<float>>(descriptor, info);
1173  }
1174 }
1175 
1176 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1177  const WorkloadInfo& info) const
1178 {
1179  if (IsQSymmS16(info))
1180  {
1181  return std::make_unique<RefTransposeQSymm16Workload>(descriptor, info);
1182  }
1183  else if (IsBFloat16(info))
1184  {
1185  return std::make_unique<RefTransposeBFloat16Workload>(descriptor, info);
1186  }
1187  else if (IsQAsymmS8(info))
1188  {
1189  return std::make_unique<RefTransposeQAsymmS8Workload>(descriptor, info);
1190  }
1192  NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
1193 }
1194 
1195 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
1196  const TransposeConvolution2dQueueDescriptor& descriptor,
1197  const WorkloadInfo& info) const
1198 {
1199  return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
1200 }
1201 
1202 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateUnidirectionalSequenceLstm(
1204  const WorkloadInfo& info) const
1205 {
1206  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(descriptor, info);;
1207 }
1208 
1209 } // namespace armnn
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
DataLayout
Definition: Types.hpp:62
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
constexpr const char * RefBackendId()
std::vector< BackendOptions > ModelOptions
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
bool IsQAsymmS8(const WorkloadInfo &info)
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
bool IsQAsymmU8(const WorkloadInfo &info)
bool IsQSymmS8(const WorkloadInfo &info)
bool IsDataType(const WorkloadInfo &info)
bool IsBFloat16(const WorkloadInfo &info)
const BackendId & GetBackendId() const override
virtual std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
std::vector< TensorInfo > m_InputTensorInfos
bool IsFloat16(const WorkloadInfo &info)
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
bool IsSigned32(const WorkloadInfo &info)
std::vector< TensorInfo > m_OutputTensorInfos
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
bool IsQSymmS16(const WorkloadInfo &info)
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468