ArmNN
 21.11
ClWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "ClWorkloadFactory.hpp"
6 #include "ClBackendId.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Exceptions.hpp>
14 #include <armnn/Logging.hpp>
15 #include <armnn/Utils.hpp>
19 
24 
25 #include <cl/ClTensorHandle.hpp>
28 
29 #include <arm_compute/core/CL/CLKernelLibrary.h>
30 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
31 #include <arm_compute/runtime/CL/CLScheduler.h>
32 
34 #include <fstream>
35 
36 #include <sys/stat.h>
37 
38 namespace armnn
39 {
40 
41 namespace
42 {
43 static const BackendId s_Id{ClBackendId()};
44 }
45 
47  Optional<DataType> dataType,
48  std::string& outReasonIfUnsupported)
49 {
50  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
51 }
52 
54  Optional<DataType> dataType,
55  std::string& outReasonIfUnsupported,
56  const ModelOptions& modelOptions)
57 {
58  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
59 }
60 
62 {
63  return s_Id;
64 }
65 
67 {
68  if(m_ModelContextPtr)
69  {
70  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
71  if (modelOptions->SaveCachedNetwork())
72  {
74  serializer.Serialize(m_CLCompileContext);
75  auto cachedFd = modelOptions->GetCachedFileDescriptor();
76  if (cachedFd != -1)
77  {
78  std::vector<uint8_t> compiledContextData;
79  std::stringstream stream;
80  bool serialized = serializer.SaveSerializedToStream(stream);
81  if (serialized)
82  {
83  std::string const serializedString{stream.str()};
84  std::copy(serializedString.begin(),
85  serializedString.end(),
86  std::back_inserter(compiledContextData));
87  auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
88  if (success == -1)
89  {
90  ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
91  }
92  }
93  }
94 
95  // Save map to a filepath provided in ModelOptions
96  auto filePath = modelOptions->GetCachedNetworkFilePath();
97  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
98  {
99  // Serialize ClContext to the file specified
100  std::ofstream file(filePath, std::ios::out | std::ios::binary);
101  serializer.SaveSerializedToStream(file);
102  }
103  }
104  }
105 }
106 
107 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
108 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
109  const WorkloadInfo& info,
110  Args&&... args)
111 {
112  try
113  {
114  return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
115  }
116  catch (const cl::Error& clError)
117  {
118  throw WrapClError(clError, CHECK_LOCATION());
119  }
120 }
121 
122 template <typename Workload, typename QueueDescriptorType, typename... Args>
123 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
124  const WorkloadInfo& info,
125  Args&&... args)
126 {
127  try
128  {
129  return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
130  }
131  catch (const cl::Error& clError)
132  {
133  throw WrapClError(clError, CHECK_LOCATION());
134  }
135 }
136 
137 void ClWorkloadFactory::InitializeCLCompileContext()
138 {
139  // Initialize our m_CLCompileContext using default device and context
140  auto context = arm_compute::CLKernelLibrary::get().context();
141  auto device = arm_compute::CLKernelLibrary::get().get_device();
142  m_CLCompileContext = arm_compute::CLCompileContext(context, device);
143 
144  if (m_ModelContextPtr)
145  {
146  // Load saved programs if the user has set a filepath
147  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
148  auto filePath = modelOptions->GetCachedNetworkFilePath();
149  if (!(modelOptions->SaveCachedNetwork()))
150  {
151  ClContextDeserializer deserializer;
152  auto cachedFd = modelOptions->GetCachedFileDescriptor();
153  if (cachedFd != -1)
154  {
155  struct stat statBuffer;
156  if (fstat(cachedFd, &statBuffer) == 0)
157  {
158  long dataSize = static_cast<long>(statBuffer.st_size);
159  if( dataSize > 0)
160  {
161  auto offset = lseek(cachedFd, 0, SEEK_CUR);
162  if (offset == 0)
163  {
164  std::vector <uint8_t> compiledContextData(static_cast<unsigned int>(dataSize));
165  auto success = pread(cachedFd, compiledContextData.data(), compiledContextData.size(), 0);
166  if (success != -1)
167  {
168  deserializer.DeserializeFromBinary(m_CLCompileContext,
169  context,
170  device,
171  compiledContextData);
172  }
173  }
174  }
175 
176  }
177  }
178 
179  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
180  {
181  // Deserialize binary file and load into m_CLCompileContext
182  deserializer.Deserialize(m_CLCompileContext, context, device, filePath);
183  }
184  }
185  }
186 }
187 
188 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager)
189  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
190 {
191  InitializeCLCompileContext();
192 }
193 
194 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager,
196  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
197 {
198  InitializeCLCompileContext();
199 }
200 
201 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
202  const bool IsMemoryManaged) const
203 {
204  IgnoreUnused(IsMemoryManaged);
205  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
206  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
207 
208  return tensorHandle;
209 }
210 
211 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
212  DataLayout dataLayout,
213  const bool IsMemoryManaged) const
214 {
215  IgnoreUnused(IsMemoryManaged);
216  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
217  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
218 
219  return tensorHandle;
220 }
221 
222 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
223  TensorShape const& subTensorShape,
224  unsigned int const* subTensorOrigin) const
225 {
227  arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
228 
229  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
230  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
231  {
232  // Arm compute indexes tensor coords in reverse order.
233  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
234  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
235  }
236 
237  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
238  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
239  {
240  return nullptr;
241  }
242 
243  return std::make_unique<ClSubTensorHandle>(
244  PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
245 }
246 
247 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
248  const WorkloadInfo& info) const
249 {
250  return MakeWorkload<ClActivationWorkload>(descriptor, info, m_CLCompileContext);
251 }
252 
253 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
254  const WorkloadInfo& info) const
255 {
256  return MakeWorkload<ClAdditionWorkload>(descriptor, info, m_CLCompileContext);
257 }
258 
259 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
260  const WorkloadInfo& info) const
261 {
262  return std::make_unique<ClArgMinMaxWorkload>(descriptor, info, m_CLCompileContext);
263 }
264 
266  const BatchNormalizationQueueDescriptor& descriptor,
267  const WorkloadInfo& info) const
268 {
269  return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
270 }
271 
272 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
273  const WorkloadInfo& info) const
274 {
275  return MakeWorkload<ClBatchToSpaceNdWorkload>(descriptor, info, m_CLCompileContext);
276 }
277 
278 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
279  const WorkloadInfo& info) const
280 {
281  return MakeWorkload<ClCastWorkload>(descriptor, info, m_CLCompileContext);
282 }
283 
284 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
285  const WorkloadInfo& info) const
286 {
287  return MakeWorkload<ClChannelShuffleWorkload>(descriptor, info, m_CLCompileContext);
288 }
289 
290 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
291  const WorkloadInfo& info) const
292 {
293  return MakeWorkload<ClComparisonWorkload>(descriptor, info, m_CLCompileContext);
294 }
295 
296 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
297  const WorkloadInfo& info) const
298 {
299  return MakeWorkload<ClConcatWorkload>(descriptor, info, m_CLCompileContext);
300 }
301 
302 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
303  const WorkloadInfo& info) const
304 {
305  return MakeWorkload<ClConstantWorkload>(descriptor, info, m_CLCompileContext);
306 }
307 
309  const ConvertFp16ToFp32QueueDescriptor& descriptor,
310  const WorkloadInfo& info) const
311 {
312  return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info, m_CLCompileContext);
313 }
314 
316  const ConvertFp32ToFp16QueueDescriptor& descriptor,
317  const WorkloadInfo& info) const
318 {
319  return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info, m_CLCompileContext);
320 }
321 
322 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
323  const WorkloadInfo& info) const
324 {
325  bool isFastMathEnabled = false;
326  if (m_ModelContextPtr)
327  {
328  if (m_ModelContextPtr.get() != nullptr)
329  {
330  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
331  if (modelOptions)
332  {
333  isFastMathEnabled = modelOptions->IsFastMathEnabled();
334  }
335  }
336  }
337  return MakeWorkload<ClConvolution2dWorkload>(descriptor,
338  info,
339  m_MemoryManager->GetIntraLayerManager(),
340  m_CLCompileContext,
341  isFastMathEnabled);
342 }
343 
344 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
345  const WorkloadInfo& info) const
346 {
347  bool isFastMathEnabled = false;
348  if (m_ModelContextPtr)
349  {
350  if (m_ModelContextPtr.get() != nullptr)
351  {
352  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
353  if (modelOptions)
354  {
355  isFastMathEnabled = modelOptions->IsFastMathEnabled();
356  }
357  }
358  }
359  return MakeWorkload<ClConvolution3dWorkload>(descriptor,
360  info,
361  m_MemoryManager->GetIntraLayerManager(),
362  m_CLCompileContext,
363  isFastMathEnabled);
364 }
365 
366 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
367  const WorkloadInfo& info) const
368 {
369  return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
370 }
371 
372 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
373  const WorkloadInfo& info) const
374 {
375  return MakeWorkload<ClDepthToSpaceWorkload>(descriptor, info, m_CLCompileContext);
376 }
377 
379  const DepthwiseConvolution2dQueueDescriptor& descriptor,
380  const WorkloadInfo& info) const
381 {
382  return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info, m_CLCompileContext);
383 }
384 
385 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
386  const WorkloadInfo& info) const
387 {
388  return MakeWorkload<ClDequantizeWorkload>(descriptor, info, m_CLCompileContext);
389 }
390 
392  const DetectionPostProcessQueueDescriptor& descriptor,
393  const WorkloadInfo& info) const
394 {
395  return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
396 }
397 
398 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
399  const WorkloadInfo& info) const
400 {
401  return std::make_unique<ClDivisionWorkload>(descriptor, info, m_CLCompileContext);
402 }
403 
405  const WorkloadInfo& info) const
406 {
407  switch(descriptor.m_Parameters.m_Operation)
408  {
409  case UnaryOperation::Abs:
410  {
411  AbsQueueDescriptor absQueueDescriptor;
412  absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
413  absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
414 
415  return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
416  }
417  case UnaryOperation::Exp:
418  return std::make_unique<ClExpWorkload>(descriptor, info, m_CLCompileContext);
419  case UnaryOperation::Log:
420  return std::make_unique<ClLogWorkload>(descriptor, info, m_CLCompileContext);
422  return std::make_unique<ClLogicalNotWorkload>(descriptor, info, m_CLCompileContext);
423  case UnaryOperation::Neg:
424  return std::make_unique<ClNegWorkload>(descriptor, info, m_CLCompileContext);
426  {
427  RsqrtQueueDescriptor rsqrtQueueDescriptor;
428  rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
429  rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
430 
431  return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
432  }
433  case UnaryOperation::Sin:
434  return std::make_unique<ClSinWorkload>(descriptor, info, m_CLCompileContext);
435  default:
436  return nullptr;
437  }
438 }
439 
440 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
441  const WorkloadInfo& info) const
442 {
443  return std::make_unique<ClFillWorkload>(descriptor, info, m_CLCompileContext);
444 }
445 
446 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
447  const WorkloadInfo& info) const
448 {
449  return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
450 }
451 
452 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
453  const WorkloadInfo& info) const
454 {
455  return MakeWorkload<ClFullyConnectedWorkload>(descriptor,
456  info,
457  m_MemoryManager->GetIntraLayerManager(),
458  m_CLCompileContext);
459 }
460 
461 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
462  const WorkloadInfo& info) const
463 {
464  return MakeWorkload<ClGatherWorkload>(descriptor, info, m_CLCompileContext);
465 }
466 
467 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
468  const WorkloadInfo& info) const
469 {
470  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
471 }
472 
474  const InstanceNormalizationQueueDescriptor& descriptor,
475  const WorkloadInfo& info) const
476 {
477  return MakeWorkload<ClInstanceNormalizationWorkload>(descriptor, info, m_CLCompileContext);
478 }
479 
481  const WorkloadInfo& info) const
482 {
483  return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
484 }
485 
486 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
487  const WorkloadInfo& info) const
488 {
489  switch(descriptor.m_Parameters.m_Operation)
490  {
492  return std::make_unique<ClLogicalAndWorkload>(descriptor, info, m_CLCompileContext);
494  return std::make_unique<ClLogicalOrWorkload>(descriptor, info, m_CLCompileContext);
495  default:
496  return nullptr;
497  }
498 }
499 
500 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
501  const WorkloadInfo& info) const
502 {
503  return MakeWorkload<ClLogSoftmaxWorkload>(descriptor,
504  info,
505  m_MemoryManager->GetIntraLayerManager(),
506  m_CLCompileContext);
507 }
508 
509 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
510  const WorkloadInfo& info) const
511 {
512  return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
513 }
514 
515 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
516  const WorkloadInfo& info) const
517 {
518  return MakeWorkload<ClMaximumWorkload>(descriptor, info, m_CLCompileContext);
519 }
520 
521 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
522  const WorkloadInfo& info) const
523 {
524  return MakeWorkload<ClMeanWorkload>(descriptor, info, m_CLCompileContext);
525 }
526 
527 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
528  const WorkloadInfo& info) const
529 {
530  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
531  {
532  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
533  }
534 
535  return MakeWorkload<CopyMemGenericWorkload>(descriptor, info);
536 }
537 
538 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
539  const WorkloadInfo& info) const
540 {
541  if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
542  {
543  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
544  }
545 
546  return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
547 }
548 
549 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
550  const WorkloadInfo& info) const
551 {
552  return MakeWorkload<ClMinimumWorkload>(descriptor, info, m_CLCompileContext);
553 }
554 
555 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
556  const WorkloadInfo& info) const
557 {
558  return MakeWorkload<ClMultiplicationWorkload>(descriptor, info, m_CLCompileContext);
559 }
560 
561 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
562  const WorkloadInfo& info) const
563 {
564  return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
565 }
566 
567 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
568  const WorkloadInfo& info) const
569 {
570  return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
571 }
572 
573 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
574  const WorkloadInfo& info) const
575 {
576  return MakeWorkload<ClPadWorkload>(descriptor, info, m_CLCompileContext);
577 }
578 
579 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
580  const WorkloadInfo& info) const
581 {
582  return MakeWorkload<ClPermuteWorkload>(descriptor, info, m_CLCompileContext);
583 }
584 
585 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
586  const WorkloadInfo& info) const
587 {
588  return MakeWorkload<ClPooling2dWorkload>(descriptor, info, m_CLCompileContext);
589 }
590 
591 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
592  const WorkloadInfo& info) const
593 {
594  return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
595 }
596 
597 std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
598  const WorkloadInfo &info) const
599 {
600  return MakeWorkload<ClPreluWorkload>(descriptor, info, m_CLCompileContext);
601 }
602 
603 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
604  const WorkloadInfo& info) const
605 {
606  return std::make_unique<ClQLstmWorkload>(descriptor, info, m_CLCompileContext);
607 }
608 
609 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
610  const WorkloadInfo& info) const
611 {
612  return MakeWorkload<ClQuantizeWorkload>(descriptor, info, m_CLCompileContext);
613 }
614 
615 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
616  const WorkloadInfo& info) const
617 {
618  return MakeWorkload<ClQuantizedLstmWorkload>(descriptor, info, m_CLCompileContext);
619 }
620 
621 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
622  const WorkloadInfo& info) const
623 {
624  return std::make_unique<ClRankWorkload>(descriptor, info);
625 }
626 
627 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
628  const WorkloadInfo& info) const
629 {
630  return std::make_unique<ClReduceWorkload>(descriptor, info);
631 }
632 
633 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
634  const WorkloadInfo& info) const
635 {
636  return MakeWorkload<ClReshapeWorkload>(descriptor, info, m_CLCompileContext);
637 }
638 
639 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
640  const WorkloadInfo& info) const
641 {
642  return MakeWorkload<ClResizeWorkload>(descriptor, info, m_CLCompileContext);
643 }
644 
645 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
646  const WorkloadInfo& info) const
647 {
648  return MakeWorkload<ClSliceWorkload>(descriptor, info, m_CLCompileContext);
649 }
650 
651 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
652  const WorkloadInfo& info) const
653 {
654  return std::make_unique<ClSoftmaxWorkload>(descriptor,
655  info,
656  m_MemoryManager->GetIntraLayerManager(),
657  m_CLCompileContext);
658 }
659 
660 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
661  const WorkloadInfo& info) const
662 {
663  return MakeWorkload<ClSpaceToBatchNdWorkload>(descriptor, info, m_CLCompileContext);
664 }
665 
666 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
667  const WorkloadInfo& info) const
668 {
669  return MakeWorkload<ClSpaceToDepthWorkload>(descriptor, info, m_CLCompileContext);
670 }
671 
672 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
673  const WorkloadInfo& info) const
674 {
675  return MakeWorkload<ClSplitterWorkload>(descriptor, info, m_CLCompileContext);
676 }
677 
678 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
679  const WorkloadInfo& info) const
680 {
681  return MakeWorkload<ClStackWorkload>(descriptor, info, m_CLCompileContext);
682 }
683 
684 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
685  const WorkloadInfo& info) const
686 {
687  return MakeWorkload<ClStridedSliceWorkload>(descriptor, info, m_CLCompileContext);
688 }
689 
690 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
691  const WorkloadInfo& info) const
692 {
693  return MakeWorkload<ClSubtractionWorkload>(descriptor, info, m_CLCompileContext);
694 }
695 
696 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
697  const WorkloadInfo& info) const
698 {
699  return MakeWorkload<ClTransposeWorkload>(descriptor, info, m_CLCompileContext);
700 }
701 
703  const TransposeConvolution2dQueueDescriptor& descriptor,
704  const WorkloadInfo& info) const
705 {
706  return MakeWorkload<ClTransposeConvolution2dWorkload>(descriptor,
707  info,
708  m_MemoryManager->GetIntraLayerManager(),
709  m_CLCompileContext);
710 }
711 
712 } // namespace armnn
std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const override
ClWorkloadFactory(const std::shared_ptr< ClMemoryManager > &memoryManager)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const override
DataLayout
Definition: Types.hpp:49
std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateConvolution3d(const Convolution3dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
void AfterWorkloadsCreated() override
std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const override
constexpr const char * ClBackendId()
Definition: ClBackendId.hpp:10
std::vector< BackendOptions > ModelOptions
std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &descriptor, const WorkloadInfo &info) const override
const BackendId & GetBackendId() const override
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
bool SaveSerializedToStream(std::ostream &stream)
Serializes the ClContext to the stream.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > FloatWorkload
Definition: Workload.hpp:170
std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const override
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
void DeserializeFromBinary(arm_compute::CLCompileContext &clCompileContext, cl::Context &context, cl::Device &device, const std::vector< uint8_t > &binaryContent)
Deserializes the CLCompileContext built-in programs from binary file contents.
std::unique_ptr< IWorkload > CreateLogicalBinary(const LogicalBinaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const override
std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateFill(const FillQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateCast(const CastQueueDescriptor &descriptor, const WorkloadInfo &info) const override
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateQLstm(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateChannelShuffle(const ChannelShuffleQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const override
void Serialize(const arm_compute::CLCompileContext &clCompileContext)
Serializes the CLCompileContext built-in programs.
std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const override
RuntimeException WrapClError(const cl::Error &clError, const CheckLocation &location)
std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const override
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
void Deserialize(arm_compute::CLCompileContext &clCompileContext, cl::Context &context, cl::Device &device, const std::string &filePath)
Deserializes the CLCompileContext built-in programs from a binary file.
TypedWorkload< QueueDescriptor, armnn::DataType::QAsymmU8 > Uint8Workload
Definition: Workload.hpp:176
std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::vector< ITensorHandle * > m_Outputs
std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &info) const override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const override
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const override
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const override
std::string GetCachedNetworkFilePath() const
Depthwise Convolution 2D layer workload data.
std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const override