ArmNN
 22.08
ClWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ClWorkloadFactory.hpp"
6 #include "ClBackendId.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Exceptions.hpp>
14 #include <armnn/Logging.hpp>
15 #include <armnn/Utils.hpp>
19 
24 
25 #include <cl/ClTensorHandle.hpp>
28 
29 #include <arm_compute/core/CL/CLKernelLibrary.h>
30 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
31 #include <arm_compute/runtime/CL/CLScheduler.h>
32 
34 #include <fstream>
35 
36 #include <sys/stat.h>
37 
38 namespace armnn
39 {
40 
41 namespace
42 {
43 static const BackendId s_Id{ClBackendId()};
44 }
45 
47  Optional<DataType> dataType,
48  std::string& outReasonIfUnsupported)
49 {
50  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
51 }
52 
54  Optional<DataType> dataType,
55  std::string& outReasonIfUnsupported,
56  const ModelOptions& modelOptions)
57 {
58  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
59 }
60 
62 {
63  return s_Id;
64 }
65 
67 {
68  if(m_ModelContextPtr)
69  {
70  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
71  if (modelOptions->SaveCachedNetwork())
72  {
74  serializer.Serialize(m_CLCompileContext);
75  auto cachedFd = modelOptions->GetCachedFileDescriptor();
76  if (cachedFd != -1)
77  {
78  std::vector<uint8_t> compiledContextData;
79  std::stringstream stream;
80  bool serialized = serializer.SaveSerializedToStream(stream);
81  if (serialized)
82  {
83  std::string const serializedString{stream.str()};
84  std::copy(serializedString.begin(),
85  serializedString.end(),
86  std::back_inserter(compiledContextData));
87  auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
88  if (success == -1)
89  {
90  ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
91  }
92  }
93  }
94 
95  // Save map to a filepath provided in ModelOptions
96  auto filePath = modelOptions->GetCachedNetworkFilePath();
97  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
98  {
99  // Serialize ClContext to the file specified
100  std::ofstream file(filePath, std::ios::out | std::ios::binary);
101  serializer.SaveSerializedToStream(file);
102  }
103  }
104  }
105 }
106 
107 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
108 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
109  const WorkloadInfo& info,
110  Args&&... args)
111 {
112  try
113  {
114  return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
115  }
116  catch (const cl::Error& clError)
117  {
118  throw WrapClError(clError, CHECK_LOCATION());
119  }
120 }
121 
122 template <typename Workload, typename QueueDescriptorType, typename... Args>
123 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
124  const WorkloadInfo& info,
125  Args&&... args)
126 {
127  try
128  {
129  return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
130  }
131  catch (const cl::Error& clError)
132  {
133  throw WrapClError(clError, CHECK_LOCATION());
134  }
135 }
136 
137 void ClWorkloadFactory::InitializeCLCompileContext()
138 {
139  // Initialize our m_CLCompileContext using default device and context
140  auto context = arm_compute::CLKernelLibrary::get().context();
141  auto device = arm_compute::CLKernelLibrary::get().get_device();
142  m_CLCompileContext = arm_compute::CLCompileContext(context, device);
143 
144  if (m_ModelContextPtr)
145  {
146  // Load saved programs if the user has set a filepath
147  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
148  auto filePath = modelOptions->GetCachedNetworkFilePath();
149  if (!(modelOptions->SaveCachedNetwork()))
150  {
151  ClContextDeserializer deserializer;
152  auto cachedFd = modelOptions->GetCachedFileDescriptor();
153  if (cachedFd != -1)
154  {
155  struct stat statBuffer;
156  if (fstat(cachedFd, &statBuffer) == 0)
157  {
158  long dataSize = static_cast<long>(statBuffer.st_size);
159  if( dataSize > 0)
160  {
161  auto offset = lseek(cachedFd, 0, SEEK_CUR);
162  if (offset == 0)
163  {
164  std::vector <uint8_t> compiledContextData(static_cast<unsigned int>(dataSize));
165  auto success = pread(cachedFd, compiledContextData.data(), compiledContextData.size(), 0);
166  if (success != -1)
167  {
168  deserializer.DeserializeFromBinary(m_CLCompileContext,
169  context,
170  device,
171  compiledContextData);
172  }
173  }
174  }
175 
176  }
177  }
178 
179  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
180  {
181  // Deserialize binary file and load into m_CLCompileContext
182  deserializer.Deserialize(m_CLCompileContext, context, device, filePath);
183  }
184  }
185  }
186 }
187 
188 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager)
189  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
190 {
191  InitializeCLCompileContext();
192 }
193 
194 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager,
196  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
197 {
198  InitializeCLCompileContext();
199 }
200 
201 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
202  const bool IsMemoryManaged) const
203 {
204  IgnoreUnused(IsMemoryManaged);
205  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
206  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
207 
208  return tensorHandle;
209 }
210 
211 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
212  DataLayout dataLayout,
213  const bool IsMemoryManaged) const
214 {
215  IgnoreUnused(IsMemoryManaged);
216  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
217  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
218 
219  return tensorHandle;
220 }
221 
222 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
223  TensorShape const& subTensorShape,
224  unsigned int const* subTensorOrigin) const
225 {
227  arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
228 
229  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
230  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
231  {
232  // Arm compute indexes tensor coords in reverse order.
233  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
234  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
235  }
236 
237  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
238  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
239  {
240  return nullptr;
241  }
242 
243  return std::make_unique<ClSubTensorHandle>(
244  PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
245 }
246 
247 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
248  const QueueDescriptor& descriptor,
249  const WorkloadInfo& info) const
250 {
251  switch(type)
252  {
253  case LayerType::Activation :
254  {
255  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
256  return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
257  }
258  case LayerType::Addition :
259  {
260  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
261  return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
262  }
263  case LayerType::ArgMinMax :
264  {
265  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
266  return MakeWorkload<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
267  }
269  {
270  auto batchNormalizationQueueDescriptor
271  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
272  return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
273  (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
274  }
276  {
277  auto batchToSpaceNdQueueDescriptor
278  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
279  return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
280  }
281  case LayerType::Cast :
282  {
283  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
284  return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
285  }
287  {
288  auto channelShuffleQueueDescriptor
289  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
290  return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
291  }
292  case LayerType::Comparison :
293  {
294  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
295  return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
296  }
297  case LayerType::Concat :
298  {
299  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
300  return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
301  }
302  case LayerType::Constant :
303  {
304  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
305  return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
306  }
308  {
309  auto convertFp16ToFp32QueueDescriptor
310  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
311  return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
312  info,
313  m_CLCompileContext);
314  }
316  {
317  auto convertFp32ToFp16QueueDescriptor
318  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
319  return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
320  info,
321  m_CLCompileContext);
322  }
324  {
325  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
326 
327  bool isFastMathEnabled = false;
328  if (m_ModelContextPtr)
329  {
330  if (m_ModelContextPtr.get() != nullptr)
331  {
332  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
333  if (modelOptions)
334  {
335  isFastMathEnabled = modelOptions->IsFastMathEnabled();
336  }
337  }
338  }
339  return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
340  info,
341  m_MemoryManager->GetIntraLayerManager(),
342  m_CLCompileContext,
343  isFastMathEnabled);
344  }
346  {
347  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
348 
349  bool isFastMathEnabled = false;
350  if (m_ModelContextPtr)
351  {
352  if (m_ModelContextPtr.get() != nullptr)
353  {
354  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
355  if (modelOptions)
356  {
357  isFastMathEnabled = modelOptions->IsFastMathEnabled();
358  }
359  }
360  }
361  return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
362  info,
363  m_MemoryManager->GetIntraLayerManager(),
364  m_CLCompileContext,
365  isFastMathEnabled);
366  }
367  case LayerType::Debug :
368  {
369  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
370  return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
371  }
373  {
374  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
375  return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
376  }
378  {
379  auto depthwiseConvolution2dQueueDescriptor
380  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
381  return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
382  info,
383  m_CLCompileContext);
384  }
385  case LayerType::Dequantize :
386  {
387  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
388  return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
389  }
391  {
392  auto detectionPostProcessQueueDescriptor
393  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
394  return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
395  info,
396  m_CLCompileContext);
397  }
398  case LayerType::Division :
399  {
400  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
401  return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
402  }
404  {
405  auto elementwiseUnaryQueueDescriptor
406  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
407 
408  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
409  {
410  case UnaryOperation::Abs:
411  {
412  AbsQueueDescriptor absQueueDescriptor;
413  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
414  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
415 
416  return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
417  }
418  case UnaryOperation::Exp:
419  return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
420  case UnaryOperation::Log:
421  return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
423  return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
424  info,
425  m_CLCompileContext);
426  case UnaryOperation::Neg:
427  return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
429  {
430  RsqrtQueueDescriptor rsqrtQueueDescriptor;
431  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
432  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
433 
434  return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
435  }
436  case UnaryOperation::Sin:
437  return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
439  return std::make_unique<ClSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
440  default:
441  return nullptr;
442  }
443  }
444  case LayerType::Fill :
445  {
446  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
447  return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
448  }
449  case LayerType::Floor :
450  {
451  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
452  return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
453  }
455  {
456  auto fullyConnectedQueueDescriptor
457  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
458  return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
459  info,
460  m_MemoryManager->GetIntraLayerManager(),
461  m_CLCompileContext);
462  }
463  case LayerType::Gather :
464  {
465  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
466  return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
467  }
468  case LayerType::GatherNd :
469  {
470  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
471  return MakeWorkload<ClGatherNdWorkload>(*gatherNdQueueDescriptor, info, m_CLCompileContext);
472  }
473  case LayerType::Input :
474  {
475  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
476  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
477  }
479  {
480  auto instanceNormalizationQueueDescriptor
481  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
482  return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
483  info,
484  m_CLCompileContext);
485  }
487  {
488  auto l2NormalizationQueueDescriptor
489  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
490  return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
491  info,
492  m_CLCompileContext);
493  }
495  {
496  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
497 
498  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
499  {
501  return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
502  info,
503  m_CLCompileContext);
505  return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
506  info,
507  m_CLCompileContext);
508  default:
509  return nullptr;
510  }
511  }
512  case LayerType::LogSoftmax :
513  {
514  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
515 
516  return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
517  info,
518  m_MemoryManager->GetIntraLayerManager(),
519  m_CLCompileContext);
520  }
521  case LayerType::Lstm :
522  {
523  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
524  return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
525  }
526  case LayerType::Maximum :
527  {
528  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
529  return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
530  }
531  case LayerType::Mean :
532  {
533  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
534  return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
535  }
536  case LayerType::MemCopy :
537  {
538  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
539  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
540  {
541  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
542  }
543  return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
544  }
545  case LayerType::MemImport :
546  {
547  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
548  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
549  {
550  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
551  }
552  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
553  }
554  case LayerType::Minimum :
555  {
556  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
557  return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
558  }
560  {
561  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
562  return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
563  }
565  {
566  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
567  return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
568  info,
569  m_CLCompileContext);
570  }
571  case LayerType::Output :
572  {
573  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
574  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
575  }
576  case LayerType::Pad :
577  {
578  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
579  return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
580  }
581  case LayerType::Permute :
582  {
583  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
584  return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
585  }
586  case LayerType::Pooling2d :
587  {
588  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
589  return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
590  }
591  case LayerType::Pooling3d :
592  {
593  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
594  return MakeWorkload<ClPooling3dWorkload>(*pooling3dQueueDescriptor, info, m_CLCompileContext);
595  }
597  {
598  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
599  return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
600  }
601  case LayerType::Prelu :
602  {
603  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
604  return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
605  }
606  case LayerType::QLstm :
607  {
608  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
609  return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
610  }
611  case LayerType::Quantize :
612  {
613  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
614  return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
615  }
617  {
618  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
619  return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
620  }
621  case LayerType::Rank :
622  {
623  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
624  return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
625  }
626  case LayerType::Reduce :
627  {
628  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
629  return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
630  }
631  case LayerType::Reshape :
632  {
633  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
634  return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
635  }
636  case LayerType::Resize :
637  {
638  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
639  return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
640  }
641  case LayerType::Slice :
642  {
643  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
644  return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
645  }
646  case LayerType::Softmax :
647  {
648  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
649  return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
650  info,
651  m_MemoryManager->GetIntraLayerManager(),
652  m_CLCompileContext);
653  }
655  {
656  auto spaceToBatchNdQueueDescriptor
657  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
658  return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
659  }
661  {
662  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
663  return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
664  }
665  case LayerType::Splitter :
666  {
667  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
668  return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
669  }
670  case LayerType::Stack :
671  {
672  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
673  return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
674  }
676  {
677  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
678  return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
679  }
681  {
682  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
683  return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
684  }
685  case LayerType::Transpose :
686  {
687  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
688  return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
689  }
691  {
692  auto transposeConvolution2dQueueDescriptor
693  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
694  return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
695  info,
696  m_MemoryManager->GetIntraLayerManager(),
697  m_CLCompileContext);
698  }
700  {
701  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
702  return MakeWorkloadHelper<ClUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc,
703  info,
704  m_CLCompileContext);
705  }
706  default:
707  return nullptr;
708  }
709 }
710 
711 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
712  const WorkloadInfo& info) const
713 {
714  return MakeWorkload<ClActivationWorkload>(descriptor, info, m_CLCompileContext);
715 }
716 
717 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
718  const WorkloadInfo& info) const
719 {
720  return MakeWorkload<ClAdditionWorkload>(descriptor, info, m_CLCompileContext);
721 }
722 
723 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
724  const WorkloadInfo& info) const
725 {
726  return std::make_unique<ClArgMinMaxWorkload>(descriptor, info, m_CLCompileContext);
727 }
728 
729 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchNormalization(
730  const BatchNormalizationQueueDescriptor& descriptor,
731  const WorkloadInfo& info) const
732 {
733  return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
734 }
735 
736 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
737  const WorkloadInfo& info) const
738 {
739  return MakeWorkload<ClBatchToSpaceNdWorkload>(descriptor, info, m_CLCompileContext);
740 }
741 
742 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
743  const WorkloadInfo& info) const
744 {
745  return MakeWorkload<ClCastWorkload>(descriptor, info, m_CLCompileContext);
746 }
747 
748 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
749  const WorkloadInfo& info) const
750 {
751  return MakeWorkload<ClChannelShuffleWorkload>(descriptor, info, m_CLCompileContext);
752 }
753 
754 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
755  const WorkloadInfo& info) const
756 {
757  return MakeWorkload<ClComparisonWorkload>(descriptor, info, m_CLCompileContext);
758 }
759 
760 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
761  const WorkloadInfo& info) const
762 {
763  return MakeWorkload<ClConcatWorkload>(descriptor, info, m_CLCompileContext);
764 }
765 
766 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
767  const WorkloadInfo& info) const
768 {
769  return MakeWorkload<ClConstantWorkload>(descriptor, info, m_CLCompileContext);
770 }
771 
772 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp16ToFp32(
773  const ConvertFp16ToFp32QueueDescriptor& descriptor,
774  const WorkloadInfo& info) const
775 {
776  return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info, m_CLCompileContext);
777 }
778 
779 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp32ToFp16(
780  const ConvertFp32ToFp16QueueDescriptor& descriptor,
781  const WorkloadInfo& info) const
782 {
783  return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info, m_CLCompileContext);
784 }
785 
786 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
787  const WorkloadInfo& info) const
788 {
789  bool isFastMathEnabled = false;
790  if (m_ModelContextPtr)
791  {
792  if (m_ModelContextPtr.get() != nullptr)
793  {
794  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
795  if (modelOptions)
796  {
797  isFastMathEnabled = modelOptions->IsFastMathEnabled();
798  }
799  }
800  }
801  return MakeWorkload<ClConvolution2dWorkload>(descriptor,
802  info,
803  m_MemoryManager->GetIntraLayerManager(),
804  m_CLCompileContext,
805  isFastMathEnabled);
806 }
807 
808 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
809  const WorkloadInfo& info) const
810 {
811  bool isFastMathEnabled = false;
812  if (m_ModelContextPtr)
813  {
814  if (m_ModelContextPtr.get() != nullptr)
815  {
816  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
817  if (modelOptions)
818  {
819  isFastMathEnabled = modelOptions->IsFastMathEnabled();
820  }
821  }
822  }
823  return MakeWorkload<ClConvolution3dWorkload>(descriptor,
824  info,
825  m_MemoryManager->GetIntraLayerManager(),
826  m_CLCompileContext,
827  isFastMathEnabled);
828 }
829 
830 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
831  const WorkloadInfo& info) const
832 {
833  return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
834 }
835 
836 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
837  const WorkloadInfo& info) const
838 {
839  return MakeWorkload<ClDepthToSpaceWorkload>(descriptor, info, m_CLCompileContext);
840 }
841 
842 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
843  const DepthwiseConvolution2dQueueDescriptor& descriptor,
844  const WorkloadInfo& info) const
845 {
846  return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info, m_CLCompileContext);
847 }
848 
849 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
850  const WorkloadInfo& info) const
851 {
852  return MakeWorkload<ClDequantizeWorkload>(descriptor, info, m_CLCompileContext);
853 }
854 
855 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDetectionPostProcess(
856  const DetectionPostProcessQueueDescriptor& descriptor,
857  const WorkloadInfo& info) const
858 {
859  return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
860 }
861 
862 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
863  const WorkloadInfo& info) const
864 {
865  return std::make_unique<ClDivisionWorkload>(descriptor, info, m_CLCompileContext);
866 }
867 
868 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
869  const WorkloadInfo& info) const
870 {
871  switch(descriptor.m_Parameters.m_Operation)
872  {
873  case UnaryOperation::Abs:
874  {
875  AbsQueueDescriptor absQueueDescriptor;
876  absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
877  absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
878 
879  return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
880  }
881  case UnaryOperation::Exp:
882  return std::make_unique<ClExpWorkload>(descriptor, info, m_CLCompileContext);
883  case UnaryOperation::Log:
884  return std::make_unique<ClLogWorkload>(descriptor, info, m_CLCompileContext);
886  return std::make_unique<ClLogicalNotWorkload>(descriptor, info, m_CLCompileContext);
887  case UnaryOperation::Neg:
888  return std::make_unique<ClNegWorkload>(descriptor, info, m_CLCompileContext);
890  {
891  RsqrtQueueDescriptor rsqrtQueueDescriptor;
892  rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
893  rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
894 
895  return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
896  }
897  case UnaryOperation::Sin:
898  return std::make_unique<ClSinWorkload>(descriptor, info, m_CLCompileContext);
899  default:
900  return nullptr;
901  }
902 }
903 
904 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
905  const WorkloadInfo& info) const
906 {
907  return std::make_unique<ClFillWorkload>(descriptor, info, m_CLCompileContext);
908 }
909 
910 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
911  const WorkloadInfo& info) const
912 {
913  return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
914 }
915 
916 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
917  const WorkloadInfo& info) const
918 {
919  return MakeWorkload<ClFullyConnectedWorkload>(descriptor,
920  info,
921  m_MemoryManager->GetIntraLayerManager(),
922  m_CLCompileContext);
923 }
924 
925 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
926  const WorkloadInfo& info) const
927 {
928  return MakeWorkload<ClGatherWorkload>(descriptor, info, m_CLCompileContext);
929 }
930 
931 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
932  const WorkloadInfo& info) const
933 {
934  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
935 }
936 
937 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInstanceNormalization(
938  const InstanceNormalizationQueueDescriptor& descriptor,
939  const WorkloadInfo& info) const
940 {
941  return MakeWorkload<ClInstanceNormalizationWorkload>(descriptor, info, m_CLCompileContext);
942 }
943 
944 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
945  const WorkloadInfo& info) const
946 {
947  return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
948 }
949 
950 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
951  const WorkloadInfo& info) const
952 {
953  switch(descriptor.m_Parameters.m_Operation)
954  {
956  return std::make_unique<ClLogicalAndWorkload>(descriptor, info, m_CLCompileContext);
958  return std::make_unique<ClLogicalOrWorkload>(descriptor, info, m_CLCompileContext);
959  default:
960  return nullptr;
961  }
962 }
963 
964 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
965  const WorkloadInfo& info) const
966 {
967  return MakeWorkload<ClLogSoftmaxWorkload>(descriptor,
968  info,
969  m_MemoryManager->GetIntraLayerManager(),
970  m_CLCompileContext);
971 }
972 
973 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
974  const WorkloadInfo& info) const
975 {
976  return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
977 }
978 
979 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
980  const WorkloadInfo& info) const
981 {
982  return MakeWorkload<ClMaximumWorkload>(descriptor, info, m_CLCompileContext);
983 }
984 
985 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
986  const WorkloadInfo& info) const
987 {
988  return MakeWorkload<ClMeanWorkload>(descriptor, info, m_CLCompileContext);
989 }
990 
991 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
992  const WorkloadInfo& info) const
993 {
994  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
995  {
996  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
997  }
998 
999  return MakeWorkload<CopyMemGenericWorkload>(descriptor, info);
1000 }
1001 
1002 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
1003  const WorkloadInfo& info) const
1004 {
1005  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
1006  {
1007  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
1008  }
1009 
1010  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
1011 }
1012 
1013 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
1014  const WorkloadInfo& info) const
1015 {
1016  return MakeWorkload<ClMinimumWorkload>(descriptor, info, m_CLCompileContext);
1017 }
1018 
1019 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
1020  const WorkloadInfo& info) const
1021 {
1022  return MakeWorkload<ClMultiplicationWorkload>(descriptor, info, m_CLCompileContext);
1023 }
1024 
1025 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
1026  const WorkloadInfo& info) const
1027 {
1028  return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
1029 }
1030 
1031 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1032  const WorkloadInfo& info) const
1033 {
1034  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
1035 }
1036 
1037 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1038  const WorkloadInfo& info) const
1039 {
1040  return MakeWorkload<ClPadWorkload>(descriptor, info, m_CLCompileContext);
1041 }
1042 
1043 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
1044  const WorkloadInfo& info) const
1045 {
1046  return MakeWorkload<ClPermuteWorkload>(descriptor, info, m_CLCompileContext);
1047 }
1048 
1049 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1050  const WorkloadInfo& info) const
1051 {
1052  return MakeWorkload<ClPooling2dWorkload>(descriptor, info, m_CLCompileContext);
1053 }
1054 
1055 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
1056  const WorkloadInfo& info) const
1057 {
1058  return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
1059 }
1060 
1061 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
1062  const WorkloadInfo &info) const
1063 {
1064  return MakeWorkload<ClPreluWorkload>(descriptor, info, m_CLCompileContext);
1065 }
1066 
1067 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
1068  const WorkloadInfo& info) const
1069 {
1070  return std::make_unique<ClQLstmWorkload>(descriptor, info, m_CLCompileContext);
1071 }
1072 
1073 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1074  const WorkloadInfo& info) const
1075 {
1076  return MakeWorkload<ClQuantizeWorkload>(descriptor, info, m_CLCompileContext);
1077 }
1078 
1079 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
1080  const WorkloadInfo& info) const
1081 {
1082  return MakeWorkload<ClQuantizedLstmWorkload>(descriptor, info, m_CLCompileContext);
1083 }
1084 
1085 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
1086  const WorkloadInfo& info) const
1087 {
1088  return std::make_unique<ClRankWorkload>(descriptor, info);
1089 }
1090 
1091 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
1092  const WorkloadInfo& info) const
1093 {
1094  return std::make_unique<ClReduceWorkload>(descriptor, info);
1095 }
1096 
1097 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1098  const WorkloadInfo& info) const
1099 {
1100  return MakeWorkload<ClReshapeWorkload>(descriptor, info, m_CLCompileContext);
1101 }
1102 
1103 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1104  const WorkloadInfo& info) const
1105 {
1106  return MakeWorkload<ClResizeWorkload>(descriptor, info, m_CLCompileContext);
1107 }
1108 
1109 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1110  const WorkloadInfo& info) const
1111 {
1112  return MakeWorkload<ClSliceWorkload>(descriptor, info, m_CLCompileContext);
1113 }
1114 
1115 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1116  const WorkloadInfo& info) const
1117 {
1118  return std::make_unique<ClSoftmaxWorkload>(descriptor,
1119  info,
1120  m_MemoryManager->GetIntraLayerManager(),
1121  m_CLCompileContext);
1122 }
1123 
1124 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1125  const WorkloadInfo& info) const
1126 {
1127  return MakeWorkload<ClSpaceToBatchNdWorkload>(descriptor, info, m_CLCompileContext);
1128 }
1129 
1130 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1131  const WorkloadInfo& info) const
1132 {
1133  return MakeWorkload<ClSpaceToDepthWorkload>(descriptor, info, m_CLCompileContext);
1134 }
1135 
1136 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1137  const WorkloadInfo& info) const
1138 {
1139  return MakeWorkload<ClSplitterWorkload>(descriptor, info, m_CLCompileContext);
1140 }
1141 
1142 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1143  const WorkloadInfo& info) const
1144 {
1145  return MakeWorkload<ClStackWorkload>(descriptor, info, m_CLCompileContext);
1146 }
1147 
1148 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1149  const WorkloadInfo& info) const
1150 {
1151  return MakeWorkload<ClStridedSliceWorkload>(descriptor, info, m_CLCompileContext);
1152 }
1153 
1154 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1155  const WorkloadInfo& info) const
1156 {
1157  return MakeWorkload<ClSubtractionWorkload>(descriptor, info, m_CLCompileContext);
1158 }
1159 
1160 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1161  const WorkloadInfo& info) const
1162 {
1163  return MakeWorkload<ClTransposeWorkload>(descriptor, info, m_CLCompileContext);
1164 }
1165 
1166 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTransposeConvolution2d(
1167  const TransposeConvolution2dQueueDescriptor& descriptor,
1168  const WorkloadInfo& info) const
1169 {
1170  return MakeWorkload<ClTransposeConvolution2dWorkload>(descriptor,
1171  info,
1172  m_MemoryManager->GetIntraLayerManager(),
1173  m_CLCompileContext);
1174 }
1175 
1176 } // namespace armnn
std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
ClWorkloadFactory(const std::shared_ptr< ClMemoryManager > &memoryManager)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &, const WorkloadInfo &) const override
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
DataLayout
Definition: Types.hpp:62
std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &, const WorkloadInfo &) const override
void AfterWorkloadsCreated() override
std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &, const WorkloadInfo &) const override
constexpr const char * ClBackendId()
Definition: ClBackendId.hpp:10
std::vector< BackendOptions > ModelOptions
std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &, const WorkloadInfo &) const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &, const WorkloadInfo &) const override
const BackendId & GetBackendId() const override
std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &, const WorkloadInfo &) const override
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &, const WorkloadInfo &) const override
bool SaveSerializedToStream(std::ostream &stream)
Serializes the ClContext to the stream.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > FloatWorkload
Definition: Workload.hpp:197
std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &, const WorkloadInfo &) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &, const WorkloadInfo &) const override
void DeserializeFromBinary(arm_compute::CLCompileContext &clCompileContext, cl::Context &context, cl::Device &device, const std::vector< uint8_t > &binaryContent)
Deserializes the CLCompileContext built-in programs from binary file contents.
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &, const WorkloadInfo &) const override
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &, const WorkloadInfo &) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &, const WorkloadInfo &) const override
void Serialize(const arm_compute::CLCompileContext &clCompileContext)
Serializes the CLCompileContext built-in programs.
std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &, const WorkloadInfo &) const override
RuntimeException WrapClError(const cl::Error &clError, const CheckLocation &location)
std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &, const WorkloadInfo &) const override
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &, const WorkloadInfo &) const override
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &, const WorkloadInfo &) const override
void Deserialize(arm_compute::CLCompileContext &clCompileContext, cl::Context &context, cl::Device &device, const std::string &filePath)
Deserializes the CLCompileContext built-in programs from a binary file.
TypedWorkload< QueueDescriptor, armnn::DataType::QAsymmU8 > Uint8Workload
Definition: Workload.hpp:203
std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &, const WorkloadInfo &) const override
std::vector< ITensorHandle * > m_Outputs
std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &, const WorkloadInfo &) const override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &, const WorkloadInfo &) const override
Contains information about TensorInfos of a layer.
std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &, const WorkloadInfo &) const override
std::vector< ITensorHandle * > m_Inputs
std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &, const WorkloadInfo &) const override
std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &, const WorkloadInfo &) const override
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &, const WorkloadInfo &) const override
std::string GetCachedNetworkFilePath() const
std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &, const WorkloadInfo &) const override
Depthwise Convolution 2D layer workload data.
std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &, const WorkloadInfo &) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468