ArmNN  NotReleased
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
11 #include <armnn/ILayerSupport.hpp>
13 
18 
20 
21 #include <boost/cast.hpp>
22 #include <boost/iterator/transform_iterator.hpp>
23 
24 #include <cstring>
25 #include <sstream>
26 
27 namespace armnn
28 {
29 
30 namespace
31 {
32 
33 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
34 {
35  if (!type)
36  {
37  return info;
38  }
39 
40  return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
41 }
42 
43 } // anonymous namespace
44 
46  const IConnectableLayer& connectableLayer,
47  Optional<DataType> dataType,
48  std::string& outReasonIfUnsupported)
49 {
50  Optional<std::string&> reason = outReasonIfUnsupported;
51  bool result;
52  const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
53 
54  auto const& backendRegistry = BackendRegistryInstance();
55  if (!backendRegistry.IsBackendRegistered(backendId))
56  {
57  std::stringstream ss;
58  ss << connectableLayer.GetName() << " is not supported on " << backendId
59  << " because this backend is not registered.";
60 
61  outReasonIfUnsupported = ss.str();
62  return false;
63  }
64 
65  auto backendFactory = backendRegistry.GetFactory(backendId);
66  auto backendObject = backendFactory();
67  auto layerSupportObject = backendObject->GetLayerSupport();
68 
69  switch(layer.GetType())
70  {
72  {
73  auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
74  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76  result = layerSupportObject->IsActivationSupported(
77  OverrideDataType(input, dataType),
78  OverrideDataType(output, dataType),
79  cLayer->GetParameters(),
80  reason);
81  break;
82  }
84  {
85  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
86  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
87  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
88  result = layerSupportObject->IsAdditionSupported(
89  OverrideDataType(input0, dataType),
90  OverrideDataType(input1, dataType),
91  OverrideDataType(output, dataType),
92  reason);
93  break;
94  }
96  {
97  auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
98  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99 
100  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
101  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
102  result = layerSupportObject->IsArgMinMaxSupported(
103  OverrideDataType(input, dataType),
104  OverrideDataType(output, DataType::Signed32),
105  descriptor,
106  reason);
107  break;
108  }
110  {
111  auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
114  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118  result = layerSupportObject->IsBatchNormalizationSupported(
119  OverrideDataType(input, dataType),
120  OverrideDataType(output, dataType),
121  OverrideDataType(mean, dataType),
122  OverrideDataType(var, dataType),
123  OverrideDataType(beta, dataType),
124  OverrideDataType(gamma, dataType),
125  cLayer->GetParameters(),
126  reason);
127  break;
128  }
130  {
131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
133  auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
134 
135  result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136  OverrideDataType(output, dataType),
137  cLayer->GetParameters(),
138  reason);
139  break;
140  }
142  {
143  auto cLayer = boost::polymorphic_downcast<const ComparisonLayer*>(&layer);
144 
145  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
147  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148 
149  result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
150  OverrideDataType(input1, dataType),
151  OverrideDataType(output, DataType::Boolean),
152  cLayer->GetParameters(),
153  reason);
154  break;
155  }
156  case LayerType::Constant:
157  {
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
160  break;
161  }
163  {
164  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
165  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
166  result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
167  break;
168  }
170  {
171  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
172  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
173  result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
174  break;
175  }
177  {
178  auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
179 
180  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
181  dataType);
182  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
183  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
184 
185  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
186 
187  // Construct optional biases object based on the value of m_BiasEnabled
188  Optional<TensorInfo> biases;
189  if (descriptor.m_BiasEnabled)
190  {
191  biases =
192  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
193  }
194 
195  result = layerSupportObject->IsConvolution2dSupported(
196  input,
197  output,
198  descriptor,
199  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
200  biases,
201  reason);
202  break;
203  }
204  case LayerType::Debug:
205  {
206  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
207  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
208 
209  result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
210  OverrideDataType(output, dataType),
211  reason);
212  break;
213  }
215  {
216  auto cLayer = boost::polymorphic_downcast<const DepthToSpaceLayer*>(&layer);
217 
218  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
219  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
220 
221  result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
222  OverrideDataType(output, dataType),
223  cLayer->GetParameters(),
224  reason);
225  break;
226  }
228  {
229  auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
230  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
231  dataType);
232  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
233  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
234 
235  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
236 
237  // Construct optional biases object based on the value of m_BiasEnabled
238  Optional<TensorInfo> biases;
239  if (descriptor.m_BiasEnabled)
240  {
241  biases =
242  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
243  }
244 
245  result = layerSupportObject->IsDepthwiseConvolutionSupported(
246  input,
247  output,
248  descriptor,
249  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
250  biases,
251  reason);
252  break;
253  }
255  {
256  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
257  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
258 
259  result = layerSupportObject->IsDequantizeSupported(input,
260  OverrideDataType(output, dataType),
261  reason);
262  break;
263  }
265  {
266  auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
269  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
270 
271  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
272  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
273  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
274  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
275 
276  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
277  result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
278  scores,
279  anchors,
280  detectionBoxes,
281  detectionClasses,
282  detectionScores,
283  numDetections,
284  descriptor,
285  reason);
286  break;
287  }
289  {
290  auto cLayer = boost::polymorphic_downcast<const ElementwiseUnaryLayer*>(&layer);
291 
292  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
293  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
294 
295  result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
296  OverrideDataType(output, dataType),
297  cLayer->GetParameters(),
298  reason);
299  break;
300  }
302  {
303  auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
304  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
305  result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
306  cLayer->GetParameters(),
307  reason);
308  break;
309  }
310  case LayerType::Floor:
311  {
312  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
313  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
314  result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
315  OverrideDataType(output, dataType),
316  reason);
317  break;
318  }
320  {
321  auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
322  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
323  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
324  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
325 
326  TensorInfo biasInfo;
327  const TensorInfo * biasInfoPtr = nullptr;
328  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
329  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
330  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
331 
332  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
333  if (descriptor.m_BiasEnabled)
334  {
335  BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
336  biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
337  biasInfoPtr = &biasInfo;
338  }
339  else
340  {
341  // If biases are not enabled pass a dummy tensorinfo for the validation
342  switch(input.GetDataType())
343  {
344  case DataType::Float16:
345  {
346  biasInfoPtr = &dummyFloat16Bias;
347  break;
348  }
349  case DataType::Float32:
350  {
351  biasInfoPtr = &dummyFloat32Bias;
352  break;
353  }
354  case DataType::QAsymmU8:
355  case DataType::QAsymmS8:
356  case DataType::QSymmS8:
357  case DataType::QSymmS16:
358  {
359  biasInfoPtr = &dummyQA8Bias;
360  break;
361  }
362  default:
363  {
364  BOOST_ASSERT_MSG(false, "Unexpected bias type");
365  }
366  }
367  }
368 
369  result = layerSupportObject->IsFullyConnectedSupported(
370  OverrideDataType(input, dataType),
371  OverrideDataType(output, dataType),
372  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
373  *biasInfoPtr,
374  descriptor,
375  reason);
376  break;
377  }
378  case LayerType::Gather:
379  {
380  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
381  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
382  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
383  result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
384  input1,
385  OverrideDataType(output, dataType),
386  reason);
387  break;
388  }
389  case LayerType::Input:
390  {
391  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
392  result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
393  break;
394  }
396  {
397  auto cLayer = boost::polymorphic_downcast<const InstanceNormalizationLayer*>(&layer);
398  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
399 
400  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
401  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
402 
403  result = layerSupportObject->IsInstanceNormalizationSupported(
404  OverrideDataType(input, dataType),
405  OverrideDataType(output, dataType),
406  descriptor,
407  reason);
408  break;
409  }
411  {
412  auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
413  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
414 
415  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
416  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
417 
418  result = layerSupportObject->IsL2NormalizationSupported(
419  OverrideDataType(input, dataType),
420  OverrideDataType(output, dataType),
421  descriptor,
422  reason);
423  break;
424  }
426  {
427  auto cLayer = boost::polymorphic_downcast<const LogSoftmaxLayer*>(&layer);
428 
429  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
430  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
431 
432  result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
433  OverrideDataType(output, dataType),
434  cLayer->GetParameters(),
435  reason);
436  break;
437  }
438  case LayerType::Lstm:
439  {
440  auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
441  const LstmDescriptor& descriptor = cLayer->GetParameters();
442 
443  // All inputs.
444  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
445  dataType);
446  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
447  dataType);
448  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
449  dataType);
450  // All outputs
451  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
452  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
453  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
454  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
455 
456  // Basic parameters
457  const TensorInfo& inputToForgetWeights
458  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
459  const TensorInfo& inputToCellWeights
460  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
461  const TensorInfo& inputToOutputWeights
462  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
463  const TensorInfo& recurrentToForgetWeights
464  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
465  const TensorInfo& recurrentToCellWeights
466  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
467  const TensorInfo& recurrentToOutputWeights
468  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
469  const TensorInfo& forgetGateBias
470  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
471  const TensorInfo& cellBias
472  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
473  const TensorInfo& outputGateBias
474  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
475 
476  LstmInputParamsInfo paramsInfo;
477 
478  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
479  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
480  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
481  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
482  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
483  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
484  paramsInfo.m_ForgetGateBias = &forgetGateBias;
485  paramsInfo.m_CellBias = &cellBias;
486  paramsInfo.m_OutputGateBias = &outputGateBias;
487 
488 
489  // Optional parameters
490  TensorInfo optInputToInputWeights;
491  TensorInfo optRecurrentToInputWeights;
492  TensorInfo optCellToInputWeights;
493  TensorInfo optInputGateBias;
494  TensorInfo optProjectionWeights;
495  TensorInfo optProjectionBias;
496  TensorInfo optCellToForgetWeights;
497  TensorInfo optCellToOutputWeights;
498  TensorInfo optInputLayerNormWeights;
499  TensorInfo optForgetLayerNormWeights;
500  TensorInfo optCellLayerNormWeights;
501  TensorInfo optOutputLayerNormWeights;
502 
503  if(!descriptor.m_CifgEnabled)
504  {
505  optInputToInputWeights =
506  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
507  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
508 
509  optRecurrentToInputWeights =
510  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
511  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
512  if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
513  {
514  optCellToInputWeights =
515  OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
516  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
517  }
518  optInputGateBias =
519  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
520  paramsInfo.m_InputGateBias = &optInputGateBias;
521  }
522 
523  if(descriptor.m_ProjectionEnabled)
524  {
525  optProjectionWeights =
526  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
527  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
528  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
529  {
530  optProjectionBias =
531  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
532  paramsInfo.m_ProjectionBias = &optProjectionBias;
533  }
534  }
535 
536  if(descriptor.m_PeepholeEnabled)
537  {
538  optCellToForgetWeights =
539  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
540  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
541  optCellToOutputWeights =
542  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
543  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
544  }
545 
546  if(descriptor.m_LayerNormEnabled)
547  {
548  if (!descriptor.m_CifgEnabled)
549  {
550  optInputLayerNormWeights = OverrideDataType(
551  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
552  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
553  }
554 
555  optForgetLayerNormWeights = OverrideDataType(
556  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
557  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
558 
559  optCellLayerNormWeights = OverrideDataType(
560  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
561  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
562 
563  optOutputLayerNormWeights = OverrideDataType(
564  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
565  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
566  }
567 
568  result = layerSupportObject->IsLstmSupported(
569  input,
570  outputStateIn,
571  cellStateIn,
572  scratchBuffer,
573  outputStateOut,
574  cellStateOut,
575  output,
576  descriptor,
577  paramsInfo,
578  reason);
579  break;
580  }
581  case LayerType::Maximum:
582  {
583  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
584  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
585  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
586 
587  result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
588  OverrideDataType(input1, dataType),
589  OverrideDataType(output, dataType),
590  reason);
591  break;
592  }
593  case LayerType::MemCopy:
594  {
595  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
596  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
597 
598  result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
599  OverrideDataType(output, dataType),
600  reason);
601  break;
602  }
604  {
605  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
606  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
607 
608  result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
609  OverrideDataType(output, dataType),
610  reason);
611  break;
612  }
613  case LayerType::Merge:
614  {
615  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
616  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
617  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
618 
619  result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
620  OverrideDataType(input1, dataType),
621  OverrideDataType(output, dataType),
622  reason);
623  break;
624  }
625  case LayerType::Concat:
626  {
627  auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
628 
629  // Get vector of all inputs.
630  auto getTensorInfo = [&dataType](const InputSlot& slot)
631  {
632  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
633  };
634  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
635  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
636  std::vector<TensorInfo> inputs(beginI, endI);
637 
638  auto getTensorInfoPtr = [](const TensorInfo& info)
639  {
640  return &info;
641  };
642  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
643  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
644  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
645 
646  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
647 
648  result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
649 
650 
651  break;
652  }
654  {
655  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
656  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
657  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
658  result = layerSupportObject->IsMultiplicationSupported(
659  OverrideDataType(input0, dataType),
660  OverrideDataType(input1, dataType),
661  OverrideDataType(output, dataType),
662  reason);
663  break;
664  }
666  {
667  auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
668  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
669  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
670  result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
671  OverrideDataType(output, dataType),
672  cLayer->GetParameters(),
673  reason);
674  break;
675  }
676  case LayerType::Output:
677  {
678  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
679  result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
680  break;
681  }
682  case LayerType::Permute:
683  {
684  auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
685  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
686  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
687  result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
688  OverrideDataType(output, dataType),
689  cLayer->GetParameters(),
690  reason);
691  break;
692  }
693  case LayerType::Pad:
694  {
695  auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
696  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
697  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
698  result = layerSupportObject->IsPadSupported(
699  OverrideDataType(input, dataType),
700  OverrideDataType(output, dataType),
701  cLayer->GetParameters(),
702  reason);
703  break;
704  }
706  {
707  auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
708  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
709  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
710  result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
711  OverrideDataType(output, dataType),
712  cLayer->GetParameters(),
713  reason);
714  break;
715  }
717  {
718  auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
719  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
720  result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
721  cLayer->GetParameters(),
722  reason);
723  break;
724  }
725  case LayerType::Quantize:
726  {
727  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
728  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
729  result = layerSupportObject->IsQuantizeSupported(input, output, reason);
730  break;
731  }
733  {
734  auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
735 
736  // Inputs
737  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
738  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
739  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
740 
741  // Outputs
742  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
743  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
744 
745  // QuantizedLstm parameters
746  QuantizedLstmInputParamsInfo paramsInfo;
747 
748  paramsInfo.m_InputToInputWeights =
749  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
750  paramsInfo.m_InputToForgetWeights =
751  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
752  paramsInfo.m_InputToCellWeights =
753  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
754  paramsInfo.m_InputToOutputWeights =
755  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
756 
757  paramsInfo.m_RecurrentToInputWeights =
758  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
759  paramsInfo.m_RecurrentToForgetWeights =
760  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
761  paramsInfo.m_RecurrentToCellWeights =
762  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
763  paramsInfo.m_RecurrentToOutputWeights =
764  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
765 
766  paramsInfo.m_InputGateBias =
767  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
768  paramsInfo.m_ForgetGateBias =
769  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
770  paramsInfo.m_CellBias =
771  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
772  paramsInfo.m_OutputGateBias =
773  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
774 
775  result = layerSupportObject->IsQuantizedLstmSupported(input,
776  previousCellStateIn,
777  previousOutputIn,
778  cellStateOut,
779  output,
780  paramsInfo,
781  reason);
782  break;
783  }
784  case LayerType::Division:
785  {
786  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
787  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
788  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
789  result = layerSupportObject->IsDivisionSupported(
790  OverrideDataType(input0, dataType),
791  OverrideDataType(input1, dataType),
792  OverrideDataType(output, dataType),
793  reason);
794  break;
795  }
796  case LayerType::Reshape:
797  {
798  auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
799  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
800  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
801  result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
802  OverrideDataType(output, dataType),
803  cLayer->GetParameters(),
804  reason);
805  break;
806  }
807  case LayerType::Resize:
808  {
809  auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
810  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
811  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
812  result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
813  OverrideDataType(output, dataType),
814  cLayer->GetParameters(),
815  reason);
816  break;
817  }
818  case LayerType::Slice:
819  {
820  auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
821 
822  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
823  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
824 
825  result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
826  OverrideDataType(output, dataType),
827  cLayer->GetParameters(),
828  reason);
829  break;
830  }
831  case LayerType::Softmax:
832  {
833  auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
834  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
835  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
836  result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
837  OverrideDataType(output, dataType),
838  cLayer->GetParameters(),
839  reason);
840  break;
841  }
843  {
844  auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
845  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
846  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
847  result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
848  OverrideDataType(output, dataType),
849  cLayer->GetParameters(),
850  reason);
851  break;
852  }
854  {
855  auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
856 
857  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
858  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
859 
860  result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
861  OverrideDataType(output, dataType),
862  cLayer->GetParameters(),
863  reason);
864  break;
865  }
866  case LayerType::Splitter:
867  {
868  auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
869  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
870 
871  // Get vector of all outputs.
872  auto getTensorInfo = [&dataType](const OutputSlot& slot)
873  {
874  return OverrideDataType(slot.GetTensorInfo(), dataType);
875  };
876  auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
877  auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
878  std::vector<TensorInfo> outputs(beginI, endI);
879 
880  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
881 
882  result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
883  outputPtrs,
884  cLayer->GetParameters(),
885  reason);
886  break;
887  }
888  case LayerType::Stack:
889  {
890  auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
891 
892  // Get vector of all inputs.
893  auto getTensorInfo = [&dataType](const InputSlot& slot)
894  {
895  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
896  };
897  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
898  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
899  std::vector<TensorInfo> inputs(beginI, endI);
900 
901  auto getTensorInfoPtr = [](const TensorInfo& info)
902  {
903  return &info;
904  };
905  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
906  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
907  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
908 
909  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
910 
911  result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
912 
913  break;
914  }
915  case LayerType::StandIn:
916  {
917  auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
918 
919  // Get vector of all inputs.
920  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
921  {
922  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
923  };
924  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
925  {
926  return OverrideDataType(slot.GetTensorInfo(), dataType);
927  };
928  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
929  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
930  std::vector<TensorInfo> inputs(beginI, endI);
931 
932  auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
933  auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
934  std::vector<TensorInfo> outputs(beginO, endO);
935 
936 
937  auto getTensorInfoPtr = [](const TensorInfo& info)
938  {
939  return &info;
940  };
941  auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
942  auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
943  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
944 
945  auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
946  auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
947  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
948 
949 
950  result = layerSupportObject->IsStandInSupported(inputPtrs,
951  outputPtrs,
952  cLayer->GetParameters(),
953  reason);
954  break;
955  }
957  {
958  auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
959  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
960  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
961  result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
962  OverrideDataType(output, dataType),
963  cLayer->GetParameters(),
964  reason);
965  break;
966  }
968  {
969  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
970  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
971  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
972  result = layerSupportObject->IsSubtractionSupported(
973  OverrideDataType(input0, dataType),
974  OverrideDataType(input1, dataType),
975  OverrideDataType(output, dataType),
976  reason);
977  break;
978  }
979  case LayerType::Switch:
980  {
981  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
982  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
983  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
984  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
985  result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
986  OverrideDataType(input1, dataType),
987  OverrideDataType(output0, dataType),
988  OverrideDataType(output1, dataType),
989  reason);
990  break;
991  }
992  case LayerType::Mean:
993  {
994  auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
995  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
996  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
997  result = layerSupportObject->IsMeanSupported(
998  OverrideDataType(input, dataType),
999  OverrideDataType(output, dataType),
1000  cLayer->GetParameters(),
1001  reason);
1002  break;
1003  }
1004  case LayerType::Minimum:
1005  {
1006  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1007  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1008  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1009  result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1010  OverrideDataType(input1, dataType),
1011  OverrideDataType(output, dataType),
1012  reason);
1013  break;
1014  }
1015  case LayerType::Prelu:
1016  {
1017  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1018  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1019  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1020  result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1021  OverrideDataType(alpha, dataType),
1022  OverrideDataType(output, dataType),
1023  reason);
1024  break;
1025  }
1027  {
1028  auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
1029 
1030  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1031  dataType);
1032  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1033 
1034  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1035 
1036  Optional<TensorInfo> biases;
1037  if (descriptor.m_BiasEnabled)
1038  {
1039  BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
1040  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1041  GetBiasTypeFromWeightsType(dataType));
1042  }
1043 
1044  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
1045  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1046 
1047  result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1048  output,
1049  descriptor,
1050  weights,
1051  biases,
1052  reason);
1053 
1054  break;
1055  }
1056  default:
1057  {
1058  BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1059  reason.value() = "Unrecognised layer type";
1060  result = false;
1061  break;
1062  }
1063  }
1064  return result;
1065 }
1066 
1068  Optional<DataType> dataType,
1069  std::string& outReasonIfUnsupported)
1070 {
1071  auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
1072  return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1073 }
1074 
1075 // Default Implementations
1076 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1077  const WorkloadInfo& /*info*/) const
1078 {
1079  return std::unique_ptr<IWorkload>();
1080 }
1081 
1082 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1083  const WorkloadInfo& /*info*/) const
1084 {
1085  return std::unique_ptr<IWorkload>();
1086 }
1087 
1088 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1089  const WorkloadInfo& /*info*/) const
1090 {
1091  return std::unique_ptr<IWorkload>();
1092 }
1093 
1094 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1095  const WorkloadInfo& /*info*/) const
1096 {
1097  return std::unique_ptr<IWorkload>();
1098 }
1099 
1101  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1102 {
1103  return std::unique_ptr<IWorkload>();
1104 }
1105 
1107  const WorkloadInfo& /*Info*/) const
1108 {
1109  return std::unique_ptr<IWorkload>();
1110 }
1111 
1112 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1113  const WorkloadInfo& /*info*/) const
1114 {
1115  return std::unique_ptr<IWorkload>();
1116 }
1117 
1118 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1119  const WorkloadInfo& /*info*/) const
1120 {
1121  return std::unique_ptr<IWorkload>();
1122 }
1123 
1124 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1125  const WorkloadInfo& /*info*/) const
1126 {
1127  return std::unique_ptr<IWorkload>();
1128 }
1129 
1131  const WorkloadInfo& /*info*/) const
1132 {
1133  return std::unique_ptr<IWorkload>();
1134 }
1135 
1137  const WorkloadInfo& /*info*/) const
1138 {
1139  return std::unique_ptr<IWorkload>();
1140 }
1141 
1142 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1143  const WorkloadInfo& /*info*/) const
1144 {
1145  return std::unique_ptr<IWorkload>();
1146 }
1147 
1148 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1149  const WorkloadInfo& /*info*/) const
1150 {
1151  return std::unique_ptr<IWorkload>();
1152 }
1153 
1154 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1155  const WorkloadInfo& /*info*/) const
1156 {
1157  return std::unique_ptr<IWorkload>();
1158 }
1159 
1161  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1162 {
1163  return std::unique_ptr<IWorkload>();
1164 }
1165 
1166 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1167  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1168 {
1169  return std::unique_ptr<IWorkload>();
1170 }
1171 
1173  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1174 {
1175  return std::unique_ptr<IWorkload>();
1176 }
1177 
1178 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1179  const WorkloadInfo& /*info*/) const
1180 {
1181  return std::unique_ptr<IWorkload>();
1182 }
1183 
1185  const WorkloadInfo& /*info*/) const
1186 {
1187  return std::unique_ptr<IWorkload>();
1188 }
1189 
1190 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1191  const WorkloadInfo& /*Info*/) const
1192 {
1193  return std::unique_ptr<IWorkload>();
1194 }
1195 
1197  const WorkloadInfo& /*info*/) const
1198 {
1199  return std::unique_ptr<IWorkload>();
1200 }
1201 
1202 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1203  const WorkloadInfo& /*info*/) const
1204 {
1205  return std::unique_ptr<IWorkload>();
1206 }
1207 
1208 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1209  const WorkloadInfo& /*info*/) const
1210 {
1211  return std::unique_ptr<IWorkload>();
1212 }
1213 
1214 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1215  const WorkloadInfo& /*info*/) const
1216 {
1217  return std::unique_ptr<IWorkload>();
1218 }
1219 
1220 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1221  const WorkloadInfo& /*info*/) const
1222 {
1223  return std::unique_ptr<IWorkload>();
1224 }
1225 
1227  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1228  const WorkloadInfo& /*info*/) const
1229 {
1230  return std::unique_ptr<IWorkload>();
1231 }
1232 
1234  const WorkloadInfo& /*info*/) const
1235 {
1236  return std::unique_ptr<IWorkload>();
1237 }
1238 
1239 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1240  const WorkloadInfo& /*info*/) const
1241 {
1242  return std::unique_ptr<IWorkload>();
1243 }
1244 
1245 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1246  const WorkloadInfo& /*info*/) const
1247 {
1248  return std::unique_ptr<IWorkload>();
1249 }
1250 
1251 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1252  const WorkloadInfo& /*info*/) const
1253 {
1254  return std::unique_ptr<IWorkload>();
1255 }
1256 
1257 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1258  const WorkloadInfo& /*Info*/) const
1259 {
1260  return std::unique_ptr<IWorkload>();
1261 }
1262 
1263 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1264  const WorkloadInfo& /*info*/) const
1265 {
1266  return std::unique_ptr<IWorkload>();
1267 }
1268 
1269 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1270  const WorkloadInfo& /*info*/) const
1271 {
1272  return std::unique_ptr<IWorkload>();
1273 }
1274 
1275 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1276  const WorkloadInfo& /*info*/) const
1277 {
1278  return std::unique_ptr<IWorkload>();
1279 }
1280 
1281 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1282  const WorkloadInfo& /*info*/) const
1283 {
1284  return std::unique_ptr<IWorkload>();
1285 }
1286 
1287 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1288  const WorkloadInfo& /*info*/) const
1289 {
1290  return std::unique_ptr<IWorkload>();
1291 }
1292 
1293 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1294  const WorkloadInfo& /*info*/) const
1295 {
1296  return std::unique_ptr<IWorkload>();
1297 }
1298 
1299 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1300  const WorkloadInfo& /*info*/) const
1301 {
1302  return std::unique_ptr<IWorkload>();
1303 }
1304 
1305 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1306  const WorkloadInfo& /*info*/) const
1307 {
1308  return std::unique_ptr<IWorkload>();
1309 }
1310 
1311 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1312  const WorkloadInfo& /*Info*/) const
1313 {
1314  return std::unique_ptr<IWorkload>();
1315 }
1316 
1317 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1318  const WorkloadInfo& /*info*/) const
1319 {
1320  return std::unique_ptr<IWorkload>();
1321 }
1322 
1323 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1324  const WorkloadInfo& /*info*/) const
1325 {
1326  return std::unique_ptr<IWorkload>();
1327 }
1328 
1329 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1330  const WorkloadInfo& /*info*/) const
1331 {
1332  return std::unique_ptr<IWorkload>();
1333 }
1334 
1335 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1336  const WorkloadInfo &/*info*/) const
1337 {
1338  return std::unique_ptr<IWorkload>();
1339 }
1340 
1341 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1342  const WorkloadInfo& /*Info*/) const
1343 {
1344  return std::unique_ptr<IWorkload>();
1345 }
1346 
1347 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1348  const WorkloadInfo& /*info*/) const
1349 {
1350  return std::unique_ptr<IWorkload>();
1351 }
1352 
1353 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1354  const WorkloadInfo& /*info*/) const
1355 {
1356  return std::unique_ptr<IWorkload>();
1357 }
1358 
1359 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1360  const WorkloadInfo& /*info*/) const
1361 {
1362  return std::unique_ptr<IWorkload>();
1363 }
1364 
1365 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1366  const WorkloadInfo& /*info*/) const
1367 {
1368  return std::unique_ptr<IWorkload>();
1369 }
1370 
1371 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1372  const WorkloadInfo& /*info*/) const
1373 {
1374  return std::unique_ptr<IWorkload>();
1375 }
1376 
1377 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1378  const WorkloadInfo& /*info*/) const
1379 {
1380  return std::unique_ptr<IWorkload>();
1381 }
1382 
1383 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1384  const WorkloadInfo& /*info*/) const
1385 {
1386  return std::unique_ptr<IWorkload>();
1387 }
1388 
1389 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1390  const WorkloadInfo& /*info*/) const
1391 {
1392  return std::unique_ptr<IWorkload>();
1393 }
1394 
1395 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1396  const WorkloadInfo& /*info*/) const
1397 {
1398  return std::unique_ptr<IWorkload>();
1399 }
1400 
1401 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1402  const WorkloadInfo& /*info*/) const
1403 {
1404  return std::unique_ptr<IWorkload>();
1405 }
1406 
1407 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1408  const WorkloadInfo& /*info*/) const
1409 {
1410  return std::unique_ptr<IWorkload>();
1411 }
1412 
1413 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1414  const WorkloadInfo& /*info*/) const
1415 {
1416  return std::unique_ptr<IWorkload>();
1417 }
1418 
1419 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1420  const WorkloadInfo& /*info*/) const
1421 {
1422  return std::unique_ptr<IWorkload>();
1423 }
1424 
1425 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1426  const WorkloadInfo& /*info*/) const
1427 {
1428  return std::unique_ptr<IWorkload>();
1429 }
1430 
1432  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1433  const WorkloadInfo& /*info*/) const
1434 {
1435  return std::unique_ptr<IWorkload>();
1436 }
1437 
1438 } // namepsace armnn
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerType GetType() const
Definition: Layer.hpp:259
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual const TensorInfo & GetTensorInfo() const =0
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:232
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
A L2NormalizationDescriptor for the L2NormalizationLayer.
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
bool m_BiasEnabled
Enable/disable bias.
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:231
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual const char * GetName() const =0
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
An LstmDescriptor for the LstmLayer.
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Definition: Layer.hpp:312
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
const InputSlot & GetInputSlot(unsigned int index) const override
Definition: Layer.hpp:310
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const