ArmNN
 24.02
ClWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ClWorkloadFactory.hpp"
6 #include "ClBackendId.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Exceptions.hpp>
14 #include <armnn/Logging.hpp>
15 #include <armnn/Utils.hpp>
19 
24 
25 #include <cl/ClTensorHandle.hpp>
28 
29 #include <arm_compute/core/CL/CLKernelLibrary.h>
30 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
31 #include <arm_compute/runtime/CL/CLScheduler.h>
32 
34 #include <fstream>
35 
36 #include <sys/stat.h>
37 
38 namespace armnn
39 {
40 
41 namespace
42 {
43 static const BackendId s_Id{ClBackendId()};
44 }
45 
47  Optional<DataType> dataType,
48  std::string& outReasonIfUnsupported)
49 {
50  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
51 }
52 
54  Optional<DataType> dataType,
55  std::string& outReasonIfUnsupported,
56  const ModelOptions& modelOptions)
57 {
58  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
59 }
60 
62 {
63  return s_Id;
64 }
65 
67 {
68  if(m_ModelContextPtr)
69  {
70  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
71  if (modelOptions->SaveCachedNetwork())
72  {
74  serializer.Serialize(m_CLCompileContext);
75  auto cachedFd = modelOptions->GetCachedFileDescriptor();
76  if (cachedFd != -1)
77  {
78  std::vector<uint8_t> compiledContextData;
79  std::stringstream stream;
80  bool serialized = serializer.SaveSerializedToStream(stream);
81  if (serialized)
82  {
83  std::string const serializedString{stream.str()};
84  std::copy(serializedString.begin(),
85  serializedString.end(),
86  std::back_inserter(compiledContextData));
87  auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
88  if (success == -1)
89  {
90  ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
91  }
92  }
93  }
94 
95  // Save map to a filepath provided in ModelOptions
96  auto filePath = modelOptions->GetCachedNetworkFilePath();
97  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
98  {
99  // Serialize ClContext to the file specified
100  std::ofstream file(filePath, std::ios::out | std::ios::binary);
101  serializer.SaveSerializedToStream(file);
102  }
103  }
104  }
105 }
106 
107 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
108 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
109  const WorkloadInfo& info,
110  Args&&... args)
111 {
112  try
113  {
114  return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
115  }
116  catch (const cl::Error& clError)
117  {
118  throw WrapClError(clError, CHECK_LOCATION());
119  }
120 }
121 
122 template <typename Workload, typename QueueDescriptorType, typename... Args>
123 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
124  const WorkloadInfo& info,
125  Args&&... args)
126 {
127  try
128  {
129  return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
130  }
131  catch (const cl::Error& clError)
132  {
133  throw WrapClError(clError, CHECK_LOCATION());
134  }
135 }
136 
137 void ClWorkloadFactory::InitializeCLCompileContext()
138 {
139  // Initialize our m_CLCompileContext using default device and context
140  auto context = arm_compute::CLKernelLibrary::get().context();
141  auto device = arm_compute::CLKernelLibrary::get().get_device();
142  m_CLCompileContext = arm_compute::CLCompileContext(context, device);
143 
144  if (m_ModelContextPtr)
145  {
146  // Load saved programs if the user has set a filepath
147  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
148  auto filePath = modelOptions->GetCachedNetworkFilePath();
149  if (!(modelOptions->SaveCachedNetwork()))
150  {
151  ClContextDeserializer deserializer;
152  auto cachedFd = modelOptions->GetCachedFileDescriptor();
153  if (cachedFd != -1)
154  {
155  struct stat statBuffer;
156  if (fstat(cachedFd, &statBuffer) == 0)
157  {
158  long dataSize = static_cast<long>(statBuffer.st_size);
159  if( dataSize > 0)
160  {
161  auto offset = lseek(cachedFd, 0, SEEK_CUR);
162  if (offset == 0)
163  {
164  std::vector <uint8_t> compiledContextData(static_cast<unsigned int>(dataSize));
165  auto success = pread(cachedFd, compiledContextData.data(), compiledContextData.size(), 0);
166  if (success != -1)
167  {
168  deserializer.DeserializeFromBinary(m_CLCompileContext,
169  context,
170  device,
171  compiledContextData);
172  }
173  }
174  }
175 
176  }
177  }
178 
179  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
180  {
181  // Deserialize binary file and load into m_CLCompileContext
182  deserializer.Deserialize(m_CLCompileContext, context, device, filePath);
183  }
184  }
185  }
186 }
187 
188 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager)
189  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
190 {
191  InitializeCLCompileContext();
192 }
193 
194 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager,
196  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
197 {
198  InitializeCLCompileContext();
199 }
200 
201 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
202  const bool IsMemoryManaged) const
203 {
204  IgnoreUnused(IsMemoryManaged);
205  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
206  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
207 
208  return tensorHandle;
209 }
210 
211 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
212  DataLayout dataLayout,
213  const bool IsMemoryManaged) const
214 {
215  IgnoreUnused(IsMemoryManaged);
216  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
217  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
218 
219  return tensorHandle;
220 }
221 
222 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
223  TensorShape const& subTensorShape,
224  unsigned int const* subTensorOrigin) const
225 {
227  arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
228 
229  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
230  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
231  {
232  // Arm compute indexes tensor coords in reverse order.
233  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
234  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
235  }
236 
237  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
238  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
239  {
240  return nullptr;
241  }
242 
243  return std::make_unique<ClSubTensorHandle>(
244  PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
245 }
246 
247 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
248  const QueueDescriptor& descriptor,
249  const WorkloadInfo& info) const
250 {
251  switch(type)
252  {
253  case LayerType::Activation :
254  {
255  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
256  return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
257  }
258  case LayerType::Addition :
259  {
260  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
261  return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
262  }
263  case LayerType::ArgMinMax :
264  {
265  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
266  return MakeWorkload<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
267  }
269  {
270  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
271  return std::make_unique<ClBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, m_CLCompileContext);
272  }
274  {
275  auto batchNormalizationQueueDescriptor
276  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
277  return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
278  (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
279  }
281  {
282  auto batchToSpaceNdQueueDescriptor
283  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
284  return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
285  }
286  case LayerType::Cast :
287  {
288  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
289  return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
290  }
292  {
293  auto channelShuffleQueueDescriptor
294  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
295  return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
296  }
297  case LayerType::Comparison :
298  {
299  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
300  return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
301  }
302  case LayerType::Concat :
303  {
304  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
305  return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
306  }
307  case LayerType::Constant :
308  {
309  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
310  return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
311  }
313  {
314  auto convertFp16ToFp32QueueDescriptor
315  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
316  return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
317  info,
318  m_CLCompileContext);
319  }
321  {
322  auto convertFp32ToFp16QueueDescriptor
323  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
324  return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
325  info,
326  m_CLCompileContext);
327  }
329  {
330  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
331  bool isFastMathEnabled = false;
332  if (m_ModelContextPtr)
333  {
334  if (m_ModelContextPtr.get() != nullptr)
335  {
336  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
337  if (modelOptions)
338  {
339  isFastMathEnabled = modelOptions->IsFastMathEnabled();
340  }
341  }
342  }
343  return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
344  info,
345  m_MemoryManager->GetIntraLayerManager(),
346  m_CLCompileContext,
347  isFastMathEnabled);
348  }
350  {
351  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
352  bool isFastMathEnabled = false;
353  if (m_ModelContextPtr)
354  {
355  if (m_ModelContextPtr.get() != nullptr)
356  {
357  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
358  if (modelOptions)
359  {
360  isFastMathEnabled = modelOptions->IsFastMathEnabled();
361  }
362  }
363  }
364  return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
365  info,
366  m_MemoryManager->GetIntraLayerManager(),
367  m_CLCompileContext,
368  isFastMathEnabled);
369  }
370  case LayerType::Debug :
371  {
372  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
373  return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
374  }
376  {
377  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
378  return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
379  }
381  {
382  auto depthwiseConvolution2dQueueDescriptor
383  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
384  return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
385  info,
386  m_CLCompileContext);
387  }
388  case LayerType::Dequantize :
389  {
390  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
391  return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
392  }
394  {
395  auto detectionPostProcessQueueDescriptor
396  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
397  return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
398  info,
399  m_CLCompileContext);
400  }
401  case LayerType::Division :
402  {
403  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
404  return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
405  }
407  {
408  auto elementwiseBinaryQueueDescriptor
409  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
410  switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
411  {
413  {
414  AdditionQueueDescriptor additionQueueDescriptor;
415  additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
416  additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
417  additionQueueDescriptor.m_AdditionalInfoObject =
418  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
419  return std::make_unique<ClAdditionWorkload>(additionQueueDescriptor, info, m_CLCompileContext);
420  }
422  {
423  DivisionQueueDescriptor divisionQueueDescriptor;
424  divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
425  divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
426  divisionQueueDescriptor.m_AdditionalInfoObject =
427  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
428  return std::make_unique<ClDivisionWorkload>(divisionQueueDescriptor, info, m_CLCompileContext);
429  }
431  {
432  MaximumQueueDescriptor maximumQueueDescriptor;
433  maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
434  maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
435  maximumQueueDescriptor.m_AdditionalInfoObject =
436  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
437  return std::make_unique<ClMaximumWorkload>(maximumQueueDescriptor, info, m_CLCompileContext);
438  }
440  {
441  MinimumQueueDescriptor minimumQueueDescriptor;
442  minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
443  minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
444  minimumQueueDescriptor.m_AdditionalInfoObject =
445  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
446  return std::make_unique<ClMinimumWorkload>(minimumQueueDescriptor, info, m_CLCompileContext);
447  }
449  {
450  MultiplicationQueueDescriptor multiplicationQueueDescriptor;
451  multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
452  multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
453  multiplicationQueueDescriptor.m_AdditionalInfoObject =
454  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
455  return std::make_unique<ClMultiplicationWorkload>(multiplicationQueueDescriptor,
456  info,
457  m_CLCompileContext);
458  }
461  {
462  return std::make_unique<ClElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor,
463  info,
464  m_CLCompileContext);
465  }
467  {
468  SubtractionQueueDescriptor subtractionQueueDescriptor;
469  subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
470  subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
471  subtractionQueueDescriptor.m_AdditionalInfoObject =
472  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
473  return std::make_unique<ClSubtractionWorkload>(subtractionQueueDescriptor,
474  info,
475  m_CLCompileContext);
476  }
477  default:
478  return nullptr;
479  }
480  }
482  {
483  auto elementwiseUnaryQueueDescriptor
484  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
485  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
486  {
487  case UnaryOperation::Abs:
488  {
489  AbsQueueDescriptor absQueueDescriptor;
490  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
491  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
492  return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
493  }
494  case UnaryOperation::Exp:
495  return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
496  case UnaryOperation::Log:
497  return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
499  return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
500  info,
501  m_CLCompileContext);
502  case UnaryOperation::Neg:
503  return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
505  {
506  RsqrtQueueDescriptor rsqrtQueueDescriptor;
507  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
508  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
509  return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
510  }
511  case UnaryOperation::Sin:
512  return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
514  return std::make_unique<ClSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
515  default:
516  return nullptr;
517  }
518  }
519  case LayerType::Fill :
520  {
521  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
522  return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
523  }
524  case LayerType::Floor :
525  {
526  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
527  return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
528  }
530  {
531  auto fullyConnectedQueueDescriptor
532  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
533  return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
534  info,
535  m_MemoryManager->GetIntraLayerManager(),
536  m_CLCompileContext);
537  }
538  case LayerType::Gather :
539  {
540  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
541  return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
542  }
543  case LayerType::GatherNd :
544  {
545  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
546  return MakeWorkload<ClGatherNdWorkload>(*gatherNdQueueDescriptor, info, m_CLCompileContext);
547  }
548  case LayerType::Input :
549  {
550  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
551  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
552  }
554  {
555  auto instanceNormalizationQueueDescriptor
556  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
557  return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
558  info,
559  m_CLCompileContext);
560  }
562  {
563  auto l2NormalizationQueueDescriptor
564  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
565  return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
566  info,
567  m_CLCompileContext);
568  }
570  {
571  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
572  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
573  {
575  return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
576  info,
577  m_CLCompileContext);
579  return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
580  info,
581  m_CLCompileContext);
582  default:
583  return nullptr;
584  }
585  }
586  case LayerType::LogSoftmax :
587  {
588  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
589  return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
590  info,
591  m_MemoryManager->GetIntraLayerManager(),
592  m_CLCompileContext);
593  }
594  case LayerType::Lstm :
595  {
596  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
597  return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
598  }
599  case LayerType::Maximum :
600  {
601  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
602  return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
603  }
604  case LayerType::Mean :
605  {
606  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
607  return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
608  }
609  case LayerType::MemCopy :
610  {
611  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
612  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
613  {
614  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
615  }
616  return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
617  }
618  case LayerType::MemImport :
619  {
620  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
621  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
622  {
623  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
624  }
625  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
626  }
627  case LayerType::Minimum :
628  {
629  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
630  return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
631  }
633  {
634  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
635  return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
636  }
638  {
639  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
640  return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
641  info,
642  m_CLCompileContext);
643  }
644  case LayerType::Output :
645  {
646  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
647  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
648  }
649  case LayerType::Pad :
650  {
651  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
652  return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
653  }
654  case LayerType::Permute :
655  {
656  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
657  return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
658  }
659  case LayerType::Pooling2d :
660  {
661  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
662  return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
663  }
664  case LayerType::Pooling3d :
665  {
666  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
667  return MakeWorkload<ClPooling3dWorkload>(*pooling3dQueueDescriptor, info, m_CLCompileContext);
668  }
670  {
671  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
672  return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
673  }
674  case LayerType::Prelu :
675  {
676  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
677  return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
678  }
679  case LayerType::QLstm :
680  {
681  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
682  return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
683  }
684  case LayerType::Quantize :
685  {
686  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
687  return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
688  }
690  {
691  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
692  return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
693  }
694  case LayerType::Rank :
695  {
696  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
697  return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
698  }
699  case LayerType::Reduce :
700  {
701  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
702  return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
703  }
704  case LayerType::Reshape :
705  {
706  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
707  return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
708  }
709  case LayerType::Resize :
710  {
711  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
712  return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
713  }
715  {
716  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
717  return MakeWorkload<ClReverseV2Workload>(*reverseV2QueueDescriptor, info, m_CLCompileContext);
718  }
719  case LayerType::Slice :
720  {
721  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
722  return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
723  }
724  case LayerType::Softmax :
725  {
726  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
727  return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
728  info,
729  m_MemoryManager->GetIntraLayerManager(),
730  m_CLCompileContext);
731  }
733  {
734  auto spaceToBatchNdQueueDescriptor
735  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
736  return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
737  }
739  {
740  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
741  return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
742  }
743  case LayerType::Splitter :
744  {
745  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
746  return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
747  }
748  case LayerType::Stack :
749  {
750  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
751  return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
752  }
754  {
755  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
756  return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
757  }
759  {
760  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
761  return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
762  }
763  case LayerType::Tile:
764  {
765  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
766  return MakeWorkload<ClTileWorkload>(*tileQueueDescriptor, info, m_CLCompileContext);
767  }
768  case LayerType::Transpose :
769  {
770  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
771  return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
772  }
774  {
775  auto transposeConvolution2dQueueDescriptor
776  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
777  return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
778  info,
779  m_MemoryManager->GetIntraLayerManager(),
780  m_CLCompileContext);
781  }
783  {
784  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
785  return MakeWorkloadHelper<ClUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc,
786  info,
787  m_CLCompileContext);
788  }
789  default:
790  return nullptr;
791  }
792 }
793 
794 
795 
796 } // namespace armnn
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::AbsQueueDescriptor
Definition: WorkloadData.hpp:651
armnn::BinaryOperation::Mul
@ Mul
ClWorkloadFactory.hpp
armnn::LayerType::Permute
@ Permute
armnn::BinaryOperation::Add
@ Add
armnn::DivisionQueueDescriptor
Definition: WorkloadData.hpp:270
armnn::MaximumQueueDescriptor
Definition: WorkloadData.hpp:282
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::ClWorkloadFactory::GetBackendId
const BackendId & GetBackendId() const override
Definition: ClWorkloadFactory.cpp:61
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::MultiplicationQueueDescriptor
Definition: WorkloadData.hpp:264
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::BinaryOperation::Sub
@ Sub
armnn::ClWorkloadFactory::CreateWorkload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
Definition: ClWorkloadFactory.cpp:247
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnnSerializer
Definition: ISerializer.hpp:11
armnn::LayerType::Tile
@ Tile
armnn::ITensorHandle::GetShape
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
armnn::LayerType::Stack
@ Stack
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::IBackendInternal
Definition: IBackendInternal.hpp:77
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NumericCast.hpp
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
ClBackendModelContext.hpp
armnn::LayerType::Slice
@ Slice
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
armnn::BinaryOperation::Maximum
@ Maximum
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
Logging.hpp
armnn::LayerType::Concat
@ Concat
armnn::UnaryOperation::Exp
@ Exp
Utils.hpp
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
PolymorphicDowncast.hpp
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
ClWorkloadUtils.hpp
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::AdditionQueueDescriptor
Definition: WorkloadData.hpp:258
armnn::RsqrtQueueDescriptor
Definition: WorkloadData.hpp:497
armnn::FloatWorkload
TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > FloatWorkload
Definition: Workload.hpp:217
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::LayerType::Multiplication
@ Multiplication
ClContextDeserializer.hpp
armnn::QueueDescriptor::m_AdditionalInfoObject
void * m_AdditionalInfoObject
Definition: WorkloadData.hpp:28
armnn::LayerType::Addition
@ Addition
Filesystem.hpp
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ClWorkloadFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Definition: ClWorkloadFactory.cpp:201
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
MakeWorkloadHelper.hpp
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::Division
@ Division
MemImportWorkload.hpp
armnn::ClWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: ClWorkloadFactory.cpp:46
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::ClWorkloadFactory::AfterWorkloadsCreated
void AfterWorkloadsCreated() override
Definition: ClWorkloadFactory.cpp:66
armnn::LayerType::Pooling3d
@ Pooling3d
ClBackendId.hpp
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
TensorHandle.hpp
ClTensorHandle.hpp
armnn::SubtractionQueueDescriptor
Definition: WorkloadData.hpp:276
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
MemCopyWorkload.hpp
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::Uint8Workload
TypedWorkload< QueueDescriptor, armnn::DataType::QAsymmU8 > Uint8Workload
Definition: Workload.hpp:223
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BackendId
Definition: BackendId.hpp:75
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::ClWorkloadFactory::CreateSubTensorHandle
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
Definition: ClWorkloadFactory.cpp:222
armnn::LayerType::MemCopy
@ MemCopy
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::ArgMinMax
@ ArgMinMax
ClWorkloads.hpp
armnn::LayerType::Pad
@ Pad
Layer.hpp
armnn::LayerType::Rank
@ Rank
armnn::MinimumQueueDescriptor
Definition: WorkloadData.hpp:473
armnn::LayerType::Mean
@ Mean
armnn::UnaryOperation::Abs
@ Abs
armnn::ClBackendId
constexpr const char * ClBackendId()
Definition: ClBackendId.hpp:10
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::LayerType::Resize
@ Resize
armnn::BinaryOperation::Div
@ Div
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ClWorkloadFactory::ClWorkloadFactory
ClWorkloadFactory(const std::shared_ptr< ClMemoryManager > &memoryManager)
Definition: ClWorkloadFactory.cpp:188
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::QLstm
@ QLstm
armnn::WrapClError
RuntimeException WrapClError(const cl::Error &clError, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:160
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
ClContextSerializer.hpp
armnn::ClContextSerializer
Definition: ClContextSerializer.hpp:15