From 03c7ff3f6188240baaeaeb405a357a0c58195fec Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Tue, 22 Aug 2023 12:00:04 +0100 Subject: IVGCVSW-7702 Update Doxygen Docu for 23.08 Signed-off-by: Nikhil Raj Change-Id: I357a9f7e47614589327c1ac5d95b6224ff77103d --- 23.08/_cl_workload_factory_8cpp_source.html | 1057 +++++++++++++++++++++++++++ 1 file changed, 1057 insertions(+) create mode 100644 23.08/_cl_workload_factory_8cpp_source.html (limited to '23.08/_cl_workload_factory_8cpp_source.html') diff --git a/23.08/_cl_workload_factory_8cpp_source.html b/23.08/_cl_workload_factory_8cpp_source.html new file mode 100644 index 0000000000..d0afa218a9 --- /dev/null +++ b/23.08/_cl_workload_factory_8cpp_source.html @@ -0,0 +1,1057 @@ + + + + + + + + +Arm NN: src/backends/cl/ClWorkloadFactory.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.08 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ClWorkloadFactory.cpp
+
+
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 #include "ClWorkloadFactory.hpp"
+
6 #include "ClBackendId.hpp"
+ + + +
10 
+
11 #include <Layer.hpp>
+
12 
+
13 #include <armnn/Exceptions.hpp>
+
14 #include <armnn/Logging.hpp>
+
15 #include <armnn/Utils.hpp>
+ + + +
19 
+ + + + +
24 
+
25 #include <cl/ClTensorHandle.hpp>
+ + +
28 
+
29 #include <arm_compute/core/CL/CLKernelLibrary.h>
+
30 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
+
31 #include <arm_compute/runtime/CL/CLScheduler.h>
+
32 
+ +
34 #include <fstream>
+
35 
+
36 #include <sys/stat.h>
+
37 
+
38 namespace armnn
+
39 {
+
40 
+
41 namespace
+
42 {
+
43 static const BackendId s_Id{ClBackendId()};
+
44 }
+
45 
+ +
47  Optional<DataType> dataType,
+
48  std::string& outReasonIfUnsupported)
+
49 {
+
50  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
+
51 }
+
52 
+ +
54  Optional<DataType> dataType,
+
55  std::string& outReasonIfUnsupported,
+
56  const ModelOptions& modelOptions)
+
57 {
+
58  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
+
59 }
+
60 
+ +
62 {
+
63  return s_Id;
+
64 }
+
65 
+ +
67 {
+
68  if(m_ModelContextPtr)
+
69  {
+
70  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+
71  if (modelOptions->SaveCachedNetwork())
+
72  {
+ +
74  serializer.Serialize(m_CLCompileContext);
+
75  auto cachedFd = modelOptions->GetCachedFileDescriptor();
+
76  if (cachedFd != -1)
+
77  {
+
78  std::vector<uint8_t> compiledContextData;
+
79  std::stringstream stream;
+
80  bool serialized = serializer.SaveSerializedToStream(stream);
+
81  if (serialized)
+
82  {
+
83  std::string const serializedString{stream.str()};
+
84  std::copy(serializedString.begin(),
+
85  serializedString.end(),
+
86  std::back_inserter(compiledContextData));
+
87  auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
+
88  if (success == -1)
+
89  {
+
90  ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
+
91  }
+
92  }
+
93  }
+
94 
+
95  // Save map to a filepath provided in ModelOptions
+
96  auto filePath = modelOptions->GetCachedNetworkFilePath();
+
97  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
+
98  {
+
99  // Serialize ClContext to the file specified
+
100  std::ofstream file(filePath, std::ios::out | std::ios::binary);
+
101  serializer.SaveSerializedToStream(file);
+
102  }
+
103  }
+
104  }
+
105 }
+
106 
+
107 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
+
108 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+
109  const WorkloadInfo& info,
+
110  Args&&... args)
+
111 {
+
112  try
+
113  {
+
114  return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
+
115  }
+
116  catch (const cl::Error& clError)
+
117  {
+
118  throw WrapClError(clError, CHECK_LOCATION());
+
119  }
+
120 }
+
121 
+
122 template <typename Workload, typename QueueDescriptorType, typename... Args>
+
123 std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
+
124  const WorkloadInfo& info,
+
125  Args&&... args)
+
126 {
+
127  try
+
128  {
+
129  return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
+
130  }
+
131  catch (const cl::Error& clError)
+
132  {
+
133  throw WrapClError(clError, CHECK_LOCATION());
+
134  }
+
135 }
+
136 
+
137 void ClWorkloadFactory::InitializeCLCompileContext()
+
138 {
+
139  // Initialize our m_CLCompileContext using default device and context
+
140  auto context = arm_compute::CLKernelLibrary::get().context();
+
141  auto device = arm_compute::CLKernelLibrary::get().get_device();
+
142  m_CLCompileContext = arm_compute::CLCompileContext(context, device);
+
143 
+
144  if (m_ModelContextPtr)
+
145  {
+
146  // Load saved programs if the user has set a filepath
+
147  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+
148  auto filePath = modelOptions->GetCachedNetworkFilePath();
+
149  if (!(modelOptions->SaveCachedNetwork()))
+
150  {
+
151  ClContextDeserializer deserializer;
+
152  auto cachedFd = modelOptions->GetCachedFileDescriptor();
+
153  if (cachedFd != -1)
+
154  {
+
155  struct stat statBuffer;
+
156  if (fstat(cachedFd, &statBuffer) == 0)
+
157  {
+
158  long dataSize = static_cast<long>(statBuffer.st_size);
+
159  if( dataSize > 0)
+
160  {
+
161  auto offset = lseek(cachedFd, 0, SEEK_CUR);
+
162  if (offset == 0)
+
163  {
+
164  std::vector <uint8_t> compiledContextData(static_cast<unsigned int>(dataSize));
+
165  auto success = pread(cachedFd, compiledContextData.data(), compiledContextData.size(), 0);
+
166  if (success != -1)
+
167  {
+
168  deserializer.DeserializeFromBinary(m_CLCompileContext,
+
169  context,
+
170  device,
+
171  compiledContextData);
+
172  }
+
173  }
+
174  }
+
175 
+
176  }
+
177  }
+
178 
+
179  if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
+
180  {
+
181  // Deserialize binary file and load into m_CLCompileContext
+
182  deserializer.Deserialize(m_CLCompileContext, context, device, filePath);
+
183  }
+
184  }
+
185  }
+
186 }
+
187 
+
188 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager)
+
189  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
+
190 {
+
191  InitializeCLCompileContext();
+
192 }
+
193 
+
194 ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager,
+ +
196  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
+
197 {
+
198  InitializeCLCompileContext();
+
199 }
+
200 
+
201 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+
202  const bool IsMemoryManaged) const
+
203 {
+
204  IgnoreUnused(IsMemoryManaged);
+
205  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
+
206  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
+
207 
+
208  return tensorHandle;
+
209 }
+
210 
+
211 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+
212  DataLayout dataLayout,
+
213  const bool IsMemoryManaged) const
+
214 {
+
215  IgnoreUnused(IsMemoryManaged);
+
216  std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
+
217  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
+
218 
+
219  return tensorHandle;
+
220 }
+
221 
+
222 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
+
223  TensorShape const& subTensorShape,
+
224  unsigned int const* subTensorOrigin) const
+
225 {
+ +
227  arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
+
228 
+
229  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
+
230  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
+
231  {
+
232  // Arm compute indexes tensor coords in reverse order.
+
233  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
+
234  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+
235  }
+
236 
+
237  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+
238  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+
239  {
+
240  return nullptr;
+
241  }
+
242 
+
243  return std::make_unique<ClSubTensorHandle>(
+
244  PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
+
245 }
+
246 
+
247 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
+
248  const QueueDescriptor& descriptor,
+
249  const WorkloadInfo& info) const
+
250 {
+
251  switch(type)
+
252  {
+
253  case LayerType::Activation :
+
254  {
+
255  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+
256  return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
+
257  }
+
258  case LayerType::Addition :
+
259  {
+
260  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+
261  return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
+
262  }
+
263  case LayerType::ArgMinMax :
+
264  {
+
265  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+
266  return MakeWorkload<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
+
267  }
+ +
269  {
+
270  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
+
271  return std::make_unique<ClBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, m_CLCompileContext);
+
272  }
+ +
274  {
+
275  auto batchNormalizationQueueDescriptor
+
276  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+
277  return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
+
278  (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
+
279  }
+ +
281  {
+
282  auto batchToSpaceNdQueueDescriptor
+
283  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+
284  return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
+
285  }
+
286  case LayerType::Cast :
+
287  {
+
288  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+
289  return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
+
290  }
+ +
292  {
+
293  auto channelShuffleQueueDescriptor
+
294  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+
295  return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
+
296  }
+
297  case LayerType::Comparison :
+
298  {
+
299  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+
300  return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
+
301  }
+
302  case LayerType::Concat :
+
303  {
+
304  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+
305  return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
+
306  }
+
307  case LayerType::Constant :
+
308  {
+
309  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+
310  return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
+
311  }
+ +
313  {
+
314  auto convertFp16ToFp32QueueDescriptor
+
315  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+
316  return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
+
317  info,
+
318  m_CLCompileContext);
+
319  }
+ +
321  {
+
322  auto convertFp32ToFp16QueueDescriptor
+
323  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+
324  return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
+
325  info,
+
326  m_CLCompileContext);
+
327  }
+ +
329  {
+
330  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+
331  bool isFastMathEnabled = false;
+
332  if (m_ModelContextPtr)
+
333  {
+
334  if (m_ModelContextPtr.get() != nullptr)
+
335  {
+
336  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+
337  if (modelOptions)
+
338  {
+
339  isFastMathEnabled = modelOptions->IsFastMathEnabled();
+
340  }
+
341  }
+
342  }
+
343  return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
+
344  info,
+
345  m_MemoryManager->GetIntraLayerManager(),
+
346  m_CLCompileContext,
+
347  isFastMathEnabled);
+
348  }
+ +
350  {
+
351  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+
352  bool isFastMathEnabled = false;
+
353  if (m_ModelContextPtr)
+
354  {
+
355  if (m_ModelContextPtr.get() != nullptr)
+
356  {
+
357  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+
358  if (modelOptions)
+
359  {
+
360  isFastMathEnabled = modelOptions->IsFastMathEnabled();
+
361  }
+
362  }
+
363  }
+
364  return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
+
365  info,
+
366  m_MemoryManager->GetIntraLayerManager(),
+
367  m_CLCompileContext,
+
368  isFastMathEnabled);
+
369  }
+
370  case LayerType::Debug :
+
371  {
+
372  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+
373  return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
+
374  }
+ +
376  {
+
377  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+
378  return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
+
379  }
+ +
381  {
+
382  auto depthwiseConvolution2dQueueDescriptor
+
383  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+
384  return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
+
385  info,
+
386  m_CLCompileContext);
+
387  }
+
388  case LayerType::Dequantize :
+
389  {
+
390  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+
391  return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
+
392  }
+ +
394  {
+
395  auto detectionPostProcessQueueDescriptor
+
396  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+
397  return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
+
398  info,
+
399  m_CLCompileContext);
+
400  }
+
401  case LayerType::Division :
+
402  {
+
403  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+
404  return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
+
405  }
+ +
407  {
+
408  auto elementwiseBinaryQueueDescriptor
+
409  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
+
410  switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
+
411  {
+ +
413  {
+
414  AdditionQueueDescriptor additionQueueDescriptor;
+
415  additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+
416  additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
417  additionQueueDescriptor.m_AdditionalInfoObject =
+
418  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+
419  return std::make_unique<ClAdditionWorkload>(additionQueueDescriptor, info, m_CLCompileContext);
+
420  }
+ +
422  {
+
423  DivisionQueueDescriptor divisionQueueDescriptor;
+
424  divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+
425  divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
426  divisionQueueDescriptor.m_AdditionalInfoObject =
+
427  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+
428  return std::make_unique<ClDivisionWorkload>(divisionQueueDescriptor, info, m_CLCompileContext);
+
429  }
+ +
431  {
+
432  MaximumQueueDescriptor maximumQueueDescriptor;
+
433  maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+
434  maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
435  maximumQueueDescriptor.m_AdditionalInfoObject =
+
436  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+
437  return std::make_unique<ClMaximumWorkload>(maximumQueueDescriptor, info, m_CLCompileContext);
+
438  }
+ +
440  {
+
441  MinimumQueueDescriptor minimumQueueDescriptor;
+
442  minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+
443  minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
444  minimumQueueDescriptor.m_AdditionalInfoObject =
+
445  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+
446  return std::make_unique<ClMinimumWorkload>(minimumQueueDescriptor, info, m_CLCompileContext);
+
447  }
+ +
449  {
+
450  MultiplicationQueueDescriptor multiplicationQueueDescriptor;
+
451  multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+
452  multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
453  multiplicationQueueDescriptor.m_AdditionalInfoObject =
+
454  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+
455  return std::make_unique<ClMultiplicationWorkload>(multiplicationQueueDescriptor,
+
456  info,
+
457  m_CLCompileContext);
+
458  }
+ + +
461  {
+
462  return std::make_unique<ClElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor,
+
463  info,
+
464  m_CLCompileContext);
+
465  }
+ +
467  {
+
468  SubtractionQueueDescriptor subtractionQueueDescriptor;
+
469  subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+
470  subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
471  subtractionQueueDescriptor.m_AdditionalInfoObject =
+
472  elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+
473  return std::make_unique<ClSubtractionWorkload>(subtractionQueueDescriptor,
+
474  info,
+
475  m_CLCompileContext);
+
476  }
+
477  default:
+
478  return nullptr;
+
479  }
+
480  }
+ +
482  {
+
483  auto elementwiseUnaryQueueDescriptor
+
484  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+
485  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
+
486  {
+
487  case UnaryOperation::Abs:
+
488  {
+
489  AbsQueueDescriptor absQueueDescriptor;
+
490  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
+
491  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
492  return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
+
493  }
+
494  case UnaryOperation::Exp:
+
495  return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+
496  case UnaryOperation::Log:
+
497  return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ +
499  return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
+
500  info,
+
501  m_CLCompileContext);
+
502  case UnaryOperation::Neg:
+
503  return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ +
505  {
+
506  RsqrtQueueDescriptor rsqrtQueueDescriptor;
+
507  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
+
508  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
509  return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
+
510  }
+
511  case UnaryOperation::Sin:
+
512  return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ +
514  return std::make_unique<ClSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+
515  default:
+
516  return nullptr;
+
517  }
+
518  }
+
519  case LayerType::Fill :
+
520  {
+
521  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+
522  return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
+
523  }
+
524  case LayerType::Floor :
+
525  {
+
526  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+
527  return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
+
528  }
+ +
530  {
+
531  auto fullyConnectedQueueDescriptor
+
532  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+
533  return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
+
534  info,
+
535  m_MemoryManager->GetIntraLayerManager(),
+
536  m_CLCompileContext);
+
537  }
+
538  case LayerType::Gather :
+
539  {
+
540  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+
541  return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
+
542  }
+
543  case LayerType::GatherNd :
+
544  {
+
545  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
+
546  return MakeWorkload<ClGatherNdWorkload>(*gatherNdQueueDescriptor, info, m_CLCompileContext);
+
547  }
+
548  case LayerType::Input :
+
549  {
+
550  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+
551  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+
552  }
+ +
554  {
+
555  auto instanceNormalizationQueueDescriptor
+
556  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+
557  return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
+
558  info,
+
559  m_CLCompileContext);
+
560  }
+ +
562  {
+
563  auto l2NormalizationQueueDescriptor
+
564  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+
565  return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
+
566  info,
+
567  m_CLCompileContext);
+
568  }
+ +
570  {
+
571  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+
572  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
+
573  {
+ +
575  return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
+
576  info,
+
577  m_CLCompileContext);
+ +
579  return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
+
580  info,
+
581  m_CLCompileContext);
+
582  default:
+
583  return nullptr;
+
584  }
+
585  }
+
586  case LayerType::LogSoftmax :
+
587  {
+
588  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+
589  return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
+
590  info,
+
591  m_MemoryManager->GetIntraLayerManager(),
+
592  m_CLCompileContext);
+
593  }
+
594  case LayerType::Lstm :
+
595  {
+
596  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+
597  return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
+
598  }
+
599  case LayerType::Maximum :
+
600  {
+
601  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+
602  return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
+
603  }
+
604  case LayerType::Mean :
+
605  {
+
606  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+
607  return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
+
608  }
+
609  case LayerType::MemCopy :
+
610  {
+
611  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+
612  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
+
613  {
+
614  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
+
615  }
+
616  return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+
617  }
+
618  case LayerType::MemImport :
+
619  {
+
620  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+
621  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
+
622  {
+
623  throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
+
624  }
+
625  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+
626  }
+
627  case LayerType::Minimum :
+
628  {
+
629  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+
630  return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
+
631  }
+ +
633  {
+
634  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+
635  return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
+
636  }
+ +
638  {
+
639  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+
640  return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
+
641  info,
+
642  m_CLCompileContext);
+
643  }
+
644  case LayerType::Output :
+
645  {
+
646  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+
647  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+
648  }
+
649  case LayerType::Pad :
+
650  {
+
651  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+
652  return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
+
653  }
+
654  case LayerType::Permute :
+
655  {
+
656  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+
657  return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
+
658  }
+
659  case LayerType::Pooling2d :
+
660  {
+
661  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+
662  return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
+
663  }
+
664  case LayerType::Pooling3d :
+
665  {
+
666  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
+
667  return MakeWorkload<ClPooling3dWorkload>(*pooling3dQueueDescriptor, info, m_CLCompileContext);
+
668  }
+ +
670  {
+
671  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+
672  return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
+
673  }
+
674  case LayerType::Prelu :
+
675  {
+
676  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+
677  return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
+
678  }
+
679  case LayerType::QLstm :
+
680  {
+
681  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+
682  return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
+
683  }
+
684  case LayerType::Quantize :
+
685  {
+
686  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+
687  return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
+
688  }
+ +
690  {
+
691  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
+
692  return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
+
693  }
+
694  case LayerType::Rank :
+
695  {
+
696  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+
697  return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
+
698  }
+
699  case LayerType::Reduce :
+
700  {
+
701  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+
702  return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
+
703  }
+
704  case LayerType::Reshape :
+
705  {
+
706  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+
707  return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
+
708  }
+
709  case LayerType::Resize :
+
710  {
+
711  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+
712  return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
+
713  }
+
714  case LayerType::Slice :
+
715  {
+
716  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+
717  return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
+
718  }
+
719  case LayerType::Softmax :
+
720  {
+
721  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+
722  return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
+
723  info,
+
724  m_MemoryManager->GetIntraLayerManager(),
+
725  m_CLCompileContext);
+
726  }
+ +
728  {
+
729  auto spaceToBatchNdQueueDescriptor
+
730  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+
731  return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
+
732  }
+ +
734  {
+
735  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+
736  return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
+
737  }
+
738  case LayerType::Splitter :
+
739  {
+
740  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+
741  return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
+
742  }
+
743  case LayerType::Stack :
+
744  {
+
745  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+
746  return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
+
747  }
+ +
749  {
+
750  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+
751  return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
+
752  }
+ +
754  {
+
755  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+
756  return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
+
757  }
+
758  case LayerType::Tile:
+
759  {
+
760  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
+
761  return MakeWorkload<ClTileWorkload>(*tileQueueDescriptor, info, m_CLCompileContext);
+
762  }
+
763  case LayerType::Transpose :
+
764  {
+
765  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+
766  return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
+
767  }
+ +
769  {
+
770  auto transposeConvolution2dQueueDescriptor
+
771  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+
772  return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
+
773  info,
+
774  m_MemoryManager->GetIntraLayerManager(),
+
775  m_CLCompileContext);
+
776  }
+ +
778  {
+
779  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+
780  return MakeWorkloadHelper<ClUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc,
+
781  info,
+
782  m_CLCompileContext);
+
783  }
+
784  default:
+
785  return nullptr;
+
786  }
+
787 }
+
788 
+
789 
+
790 
+
791 } // namespace armnn
+
+
+ + + + + + + + + + +
RuntimeException WrapClError(const cl::Error &clError, const CheckLocation &location)
+ +
const BackendId & GetBackendId() const override
+
DataLayout
Definition: Types.hpp:62
+ + + + + + +
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
+ + +
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
+ + + + + +
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
+ + + + + + + + +
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
+ + + + + +
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
+ + + +
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
+ + + + +
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
+ + + + + + + + + +
Contains information about TensorInfos of a layer.
+ + + + + + + + + + +
TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > FloatWorkload
Definition: Workload.hpp:217
+ + + + + + + +
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
+ + + + + + +
std::vector< ITensorHandle * > m_Outputs
+
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
+ + +
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
+ + +
void AfterWorkloadsCreated() override
+ + + + + + + + + + + + + + + + + +
void IgnoreUnused(Ts &&...)
+
TypedWorkload< QueueDescriptor, armnn::DataType::QAsymmU8 > Uint8Workload
Definition: Workload.hpp:223
+ + + + + +
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
+ + +
Copyright (c) 2021 ARM Limited and Contributors.
+ + + + + + + + +
constexpr const char * ClBackendId()
Definition: ClBackendId.hpp:10
+
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
+ +
std::vector< BackendOptions > ModelOptions
+ + + + + + + + +
ClWorkloadFactory(const std::shared_ptr< ClMemoryManager > &memoryManager)
+
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
+ +
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
+
std::vector< ITensorHandle * > m_Inputs
+ + + + + + + + + -- cgit v1.2.1