ArmNN
 22.02
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
20 #include <sstream>
21 
22 namespace armnn
23 {
24 
25 namespace
26 {
27 using LayerList = std::list<Layer*>;
28 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
29 
30 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
31 {
32  if (!type)
33  {
34  return info;
35  }
36 
37  return TensorInfo(info.GetShape(),
38  type.value(),
39  info.GetQuantizationScale(),
40  info.GetQuantizationOffset(),
41  info.IsConstant());
42 }
43 
44 } // anonymous namespace
45 
47 {
48  if (!weightsType)
49  {
50  return weightsType;
51  }
52 
53  switch(weightsType.value())
54  {
58  return weightsType;
64  default:
65  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
66  }
67  return armnn::EmptyOptional();
68 }
69 
70 
71 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
72  const IConnectableLayer& connectableLayer,
73  Optional<DataType> dataType,
74  std::string& outReasonIfUnsupported,
75  const ModelOptions& modelOptions)
76 {
77  Optional<std::string&> reason = outReasonIfUnsupported;
78  bool result;
79  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
80 
81  auto const& backendRegistry = BackendRegistryInstance();
82  if (!backendRegistry.IsBackendRegistered(backendId))
83  {
84  std::stringstream ss;
85  ss << connectableLayer.GetName() << " is not supported on " << backendId
86  << " because this backend is not registered.";
87 
88  outReasonIfUnsupported = ss.str();
89  return false;
90  }
91 
92  auto backendFactory = backendRegistry.GetFactory(backendId);
93  auto backendObject = backendFactory();
94  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
112  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
114  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
115  result = layerSupportObject.IsAdditionSupported(
116  OverrideDataType(input0, dataType),
117  OverrideDataType(input1, dataType),
118  OverrideDataType(output, dataType),
119  reason);
120  break;
121  }
123  {
124  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
125  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
126 
127  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
128  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
129  result = layerSupportObject.IsArgMinMaxSupported(
130  OverrideDataType(input, dataType),
131  OverrideDataType(output, DataType::Signed32),
132  descriptor,
133  reason);
134  break;
135  }
137  {
138  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
139  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
140  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
141  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
142  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
143  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
144  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
145  result = layerSupportObject.IsBatchNormalizationSupported(
146  OverrideDataType(input, dataType),
147  OverrideDataType(output, dataType),
148  OverrideDataType(mean, dataType),
149  OverrideDataType(var, dataType),
150  OverrideDataType(beta, dataType),
151  OverrideDataType(gamma, dataType),
152  cLayer->GetParameters(),
153  reason);
154  break;
155  }
157  {
158  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
159  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
160  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
161 
162  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
163  OverrideDataType(output, dataType),
164  cLayer->GetParameters(),
165  reason);
166  break;
167  }
168  case LayerType::Cast:
169  {
170  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
171  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
172 
173  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
174  OverrideDataType(output, dataType),
175  reason);
176  break;
177  }
179  {
180  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
181 
182  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
183  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
184 
185  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
186 
187  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
188  OverrideDataType(output, dataType),
189  descriptor,
190  reason);
191  break;
192  }
194  {
195  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
196 
197  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
198  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
199  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
200 
201  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
202  OverrideDataType(input1, dataType),
203  OverrideDataType(output, DataType::Boolean),
204  cLayer->GetParameters(),
205  reason);
206  break;
207  }
208  case LayerType::Constant:
209  {
210  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
211  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
212  break;
213  }
215  {
216  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
217  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
218  result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
219  break;
220  }
222  {
223  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
224  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
225  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
226  break;
227  }
229  {
230  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
231  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
232  result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
233  break;
234  }
236  {
237  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
238  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
239  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
240  break;
241  }
243  {
244  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
245 
246  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
247  dataType);
248  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
249  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
250 
251  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
252 
253  // Construct optional biases object based on the value of m_BiasEnabled
254  Optional<TensorInfo> biases;
255  if (descriptor.m_BiasEnabled)
256  {
257  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
258  }
259 
260  result = layerSupportObject.IsConvolution2dSupported(
261  input,
262  output,
263  descriptor,
264  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
265  biases,
266  reason);
267  break;
268  }
270  {
271  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
272 
273  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
274  dataType);
275  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
276 
277  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
278  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
279  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
280  dataType);
281 
282  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
283 
284  // Construct optional biases object based on the value of m_BiasEnabled
285  Optional<TensorInfo> biases;
286  if (descriptor.m_BiasEnabled)
287  {
288  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
289  GetBiasTypeFromWeightsType(dataType));
290  }
291 
292  result = layerSupportObject.IsConvolution3dSupported(
293  input,
294  output,
295  descriptor,
296  weights,
297  biases,
298  reason);
299  break;
300  }
301  case LayerType::Debug:
302  {
303  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
304  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
305 
306  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
307  OverrideDataType(output, dataType),
308  reason);
309  break;
310  }
312  {
313  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
314 
315  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
316  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
317 
318  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
319  OverrideDataType(output, dataType),
320  cLayer->GetParameters(),
321  reason);
322  break;
323  }
325  {
326  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
327  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
328  dataType);
329  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
330  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
331 
332  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
333 
334  // Construct optional biases object based on the value of m_BiasEnabled
335  Optional<TensorInfo> biases;
336  if (descriptor.m_BiasEnabled)
337  {
338  biases =
339  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
340  }
341 
342  result = layerSupportObject.IsDepthwiseConvolutionSupported(
343  input,
344  output,
345  descriptor,
346  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
347  biases,
348  reason);
349  break;
350  }
352  {
353  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
354  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
355 
356  result = layerSupportObject.IsDequantizeSupported(input,
357  OverrideDataType(output, dataType),
358  reason);
359  break;
360  }
362  {
363  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
364  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
365  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
366  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
367 
368  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
369  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
370  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
371  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
372 
373  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
374  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
375  scores,
376  anchors,
377  detectionBoxes,
378  detectionClasses,
379  detectionScores,
380  numDetections,
381  descriptor,
382  reason);
383  break;
384  }
386  {
387  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
388 
389  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
390  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
391 
392  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
393  OverrideDataType(output, dataType),
394  cLayer->GetParameters(),
395  reason);
396  break;
397  }
398  case LayerType::Fill:
399  {
400  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
401  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
402  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
403  const FillDescriptor& descriptor = cLayer->GetParameters();
404 
405  result = layerSupportObject.IsFillSupported(
406  OverrideDataType(input, dataType),
407  OverrideDataType(output, dataType),
408  descriptor,
409  reason);
410  break;
411  }
413  {
414  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
415  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
416  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
417  cLayer->GetParameters(),
418  reason);
419  break;
420  }
421  case LayerType::Floor:
422  {
423  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
424  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
425  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
426  OverrideDataType(output, dataType),
427  reason);
428  break;
429  }
431  {
432  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
433  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
434  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
435 
436  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
437  TensorInfo weightsInfo;
438  const TensorInfo* weightsInfoPtr = nullptr;
439 
440  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
441  weightsInfoPtr = &weightsInfo;
442 
443  TensorInfo biasInfo;
444  const TensorInfo* biasInfoPtr = nullptr;
445  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
446  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
447  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
448  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
449 
450  if (descriptor.m_BiasEnabled)
451  {
452  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
453  biasInfoPtr = &biasInfo;
454  }
455  else
456  {
457  // If biases are not enabled pass a dummy tensorinfo for the validation
458  switch(input.GetDataType())
459  {
460  case DataType::BFloat16:
461  {
462  biasInfoPtr = &dummyBFloat16Bias;
463  break;
464  }
465  case DataType::Float16:
466  {
467  biasInfoPtr = &dummyFloat16Bias;
468  break;
469  }
470  case DataType::Float32:
471  {
472  biasInfoPtr = &dummyFloat32Bias;
473  break;
474  }
475  case DataType::QAsymmU8:
476  case DataType::QAsymmS8:
477  case DataType::QSymmS8:
478  case DataType::QSymmS16:
479  {
480  biasInfoPtr = &dummyQA8Bias;
481  break;
482  }
483  default:
484  {
485  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
486  }
487  }
488  }
489  result = layerSupportObject.IsFullyConnectedSupported(
490  OverrideDataType(input, dataType),
491  OverrideDataType(output, dataType),
492  *weightsInfoPtr,
493  *biasInfoPtr,
494  descriptor,
495  reason);
496  break;
497  }
498  case LayerType::Gather:
499  {
500  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
501  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
502  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
503  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
504  const GatherDescriptor& descriptor = cLayer->GetParameters();
505  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
506  input1,
507  OverrideDataType(output, dataType),
508  descriptor,
509  reason);
510  break;
511  }
512  case LayerType::Input:
513  {
514  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
515  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
516  break;
517  }
519  {
520  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
521  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
522 
523  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
524  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
525 
526  result = layerSupportObject.IsInstanceNormalizationSupported(
527  OverrideDataType(input, dataType),
528  OverrideDataType(output, dataType),
529  descriptor,
530  reason);
531  break;
532  }
534  {
535  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
536  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
537 
538  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
539  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
540 
541  result = layerSupportObject.IsL2NormalizationSupported(
542  OverrideDataType(input, dataType),
543  OverrideDataType(output, dataType),
544  descriptor,
545  reason);
546  break;
547  }
549  {
550  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
551 
552  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
553  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
554  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
555 
556  result = layerSupportObject.IsLogicalBinarySupported(input0,
557  input1,
558  output,
559  cLayer->GetParameters(),
560  reason);
561  break;
562  }
564  {
565  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
566 
567  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
568  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
569 
570  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
571  OverrideDataType(output, dataType),
572  cLayer->GetParameters(),
573  reason);
574  break;
575  }
576  case LayerType::Lstm:
577  {
578  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
579  const LstmDescriptor& descriptor = cLayer->GetParameters();
580 
581  // All inputs.
582  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
583  dataType);
584  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
585  dataType);
586  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
587  dataType);
588  // All outputs
589  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
590  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
591  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
592  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
593 
594  // Basic parameters
595  const TensorInfo& inputToForgetWeights
596  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
597  const TensorInfo& inputToCellWeights
598  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
599  const TensorInfo& inputToOutputWeights
600  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
601  const TensorInfo& recurrentToForgetWeights
602  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
603  const TensorInfo& recurrentToCellWeights
604  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
605  const TensorInfo& recurrentToOutputWeights
606  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
607  const TensorInfo& forgetGateBias
608  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
609  const TensorInfo& cellBias
610  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
611  const TensorInfo& outputGateBias
612  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
613 
614  LstmInputParamsInfo paramsInfo;
615 
616  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
617  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
618  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
619  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
620  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
621  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
622  paramsInfo.m_ForgetGateBias = &forgetGateBias;
623  paramsInfo.m_CellBias = &cellBias;
624  paramsInfo.m_OutputGateBias = &outputGateBias;
625 
626 
627  // Optional parameters
628  TensorInfo optInputToInputWeights;
629  TensorInfo optRecurrentToInputWeights;
630  TensorInfo optCellToInputWeights;
631  TensorInfo optInputGateBias;
632  TensorInfo optProjectionWeights;
633  TensorInfo optProjectionBias;
634  TensorInfo optCellToForgetWeights;
635  TensorInfo optCellToOutputWeights;
636  TensorInfo optInputLayerNormWeights;
637  TensorInfo optForgetLayerNormWeights;
638  TensorInfo optCellLayerNormWeights;
639  TensorInfo optOutputLayerNormWeights;
640 
641  if(!descriptor.m_CifgEnabled)
642  {
643  optInputToInputWeights =
644  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
645  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
646 
647  optRecurrentToInputWeights =
648  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
649  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
650  optInputGateBias =
651  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
652  paramsInfo.m_InputGateBias = &optInputGateBias;
653  }
654 
655  if(descriptor.m_ProjectionEnabled)
656  {
657  optProjectionWeights =
658  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
659  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
660  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
661  {
662  optProjectionBias =
663  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
664  paramsInfo.m_ProjectionBias = &optProjectionBias;
665  }
666  }
667 
668  if(descriptor.m_PeepholeEnabled)
669  {
670  if(!descriptor.m_CifgEnabled)
671  {
672  optCellToInputWeights =
673  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
674  dataType);
675  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
676  }
677  optCellToForgetWeights =
678  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
679  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
680  optCellToOutputWeights =
681  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
682  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
683  }
684 
685  if(descriptor.m_LayerNormEnabled)
686  {
687  if (!descriptor.m_CifgEnabled)
688  {
689  optInputLayerNormWeights = OverrideDataType(
690  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
691  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
692  }
693 
694  optForgetLayerNormWeights = OverrideDataType(
695  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
696  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
697 
698  optCellLayerNormWeights = OverrideDataType(
699  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
700  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
701 
702  optOutputLayerNormWeights = OverrideDataType(
703  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
704  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
705  }
706 
707  result = layerSupportObject.IsLstmSupported(
708  input,
709  outputStateIn,
710  cellStateIn,
711  scratchBuffer,
712  outputStateOut,
713  cellStateOut,
714  output,
715  descriptor,
716  paramsInfo,
717  reason);
718  break;
719  }
720  case LayerType::Maximum:
721  {
722  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
723  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
724  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
725 
726  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
727  OverrideDataType(input1, dataType),
728  OverrideDataType(output, dataType),
729  reason);
730  break;
731  }
732  case LayerType::MemCopy:
733  {
734  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
735  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
736 
737  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
738  OverrideDataType(output, dataType),
739  reason);
740  break;
741  }
743  {
744  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
745  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
746 
747  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
748  OverrideDataType(output, dataType),
749  reason);
750  break;
751  }
752  case LayerType::Merge:
753  {
754  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
755  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
756  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
757 
758  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
759  OverrideDataType(input1, dataType),
760  OverrideDataType(output, dataType),
761  reason);
762  break;
763  }
764  case LayerType::Concat:
765  {
766  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
767 
768  // Get vector of all inputs.
769  auto getTensorInfo = [&dataType](const InputSlot& slot)
770  {
771  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
772  };
773 
774  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
775  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
776  std::vector<TensorInfo> inputs(beginI, endI);
777 
778  auto getTensorInfoPtr = [](const TensorInfo& info)
779  {
780  return &info;
781  };
782 
783  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
784  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
785  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
786 
787  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
788 
789  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
790 
791 
792  break;
793  }
795  {
796  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
797  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
798  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
799  result = layerSupportObject.IsMultiplicationSupported(
800  OverrideDataType(input0, dataType),
801  OverrideDataType(input1, dataType),
802  OverrideDataType(output, dataType),
803  reason);
804  break;
805  }
807  {
808  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
809  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
810  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
811  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
812  OverrideDataType(output, dataType),
813  cLayer->GetParameters(),
814  reason);
815  break;
816  }
817  case LayerType::Output:
818  {
819  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
820  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
821  break;
822  }
823  case LayerType::Permute:
824  {
825  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
826  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
827  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
828  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
829  OverrideDataType(output, dataType),
830  cLayer->GetParameters(),
831  reason);
832  break;
833  }
834  case LayerType::Pad:
835  {
836  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
837  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
838  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
839  result = layerSupportObject.IsPadSupported(
840  OverrideDataType(input, dataType),
841  OverrideDataType(output, dataType),
842  cLayer->GetParameters(),
843  reason);
844  break;
845  }
847  {
848  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
849  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
850  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
851  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
852  OverrideDataType(output, dataType),
853  cLayer->GetParameters(),
854  reason);
855  break;
856  }
858  {
859  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
860  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
861  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
862  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
863  OverrideDataType(output, dataType),
864  cLayer->GetParameters(),
865  reason);
866  break;
867  }
869  {
870  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
871  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
872  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
873  cLayer->GetParameters(),
874  reason);
875  break;
876  }
877  case LayerType::Quantize:
878  {
879  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
880  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
881  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
882  break;
883  }
884  case LayerType::QLstm:
885  {
886  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
887  const QLstmDescriptor& descriptor = cLayer->GetParameters();
888 
889  // Inputs
890  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
891  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
892  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
893 
894  // Outputs
895  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
896  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
897  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
898 
899  // Lstm parameters
900  LstmInputParamsInfo paramsInfo;
901 
902  // Basic parameters
903  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
904  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
905  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
906  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
907  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
908  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
909 
910  paramsInfo.m_RecurrentToForgetWeights =
911  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
912  paramsInfo.m_RecurrentToCellWeights =
913  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
914  paramsInfo.m_RecurrentToOutputWeights =
915  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
916 
917  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
918  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
919  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
920 
921  if(!descriptor.m_CifgEnabled)
922  {
923  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
924  paramsInfo.m_RecurrentToInputWeights =
925  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
926  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
927  }
928 
929  if(descriptor.m_ProjectionEnabled)
930  {
931  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
932 
933  // Projection bias is optional even if projection is enabled
934  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
935  {
936  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
937  }
938  }
939 
940  if(descriptor.m_PeepholeEnabled)
941  {
942  if (!descriptor.m_CifgEnabled)
943  {
944  paramsInfo.m_CellToInputWeights =
945  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
946  }
947 
948  paramsInfo.m_CellToForgetWeights =
949  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
950  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
951  }
952 
953  if(descriptor.m_LayerNormEnabled)
954  {
955  if (!descriptor.m_CifgEnabled)
956  {
957  paramsInfo.m_InputLayerNormWeights =
958  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
959  }
960 
961  paramsInfo.m_ForgetLayerNormWeights =
962  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
963  paramsInfo.m_CellLayerNormWeights =
964  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
965  paramsInfo.m_OutputLayerNormWeights =
966  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
967  }
968 
969  result = layerSupportObject.IsQLstmSupported(input,
970  previousOutputIn,
971  previousCellStateIn,
972  outputStateOut,
973  cellStateOut,
974  output,
975  descriptor,
976  paramsInfo,
977  reason);
978  break;
979  }
981  {
982  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
983 
984  // Inputs
985  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
986  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
987  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
988 
989  // Outputs
990  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
991  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
992 
993  // QuantizedLstm parameters
994  QuantizedLstmInputParamsInfo paramsInfo;
995 
996  paramsInfo.m_InputToInputWeights =
997  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
998  paramsInfo.m_InputToForgetWeights =
999  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1000  paramsInfo.m_InputToCellWeights =
1001  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1002  paramsInfo.m_InputToOutputWeights =
1003  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1004 
1005  paramsInfo.m_RecurrentToInputWeights =
1006  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1007  paramsInfo.m_RecurrentToForgetWeights =
1008  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1009  paramsInfo.m_RecurrentToCellWeights =
1010  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1011  paramsInfo.m_RecurrentToOutputWeights =
1012  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1013 
1014  paramsInfo.m_InputGateBias =
1015  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1016  paramsInfo.m_ForgetGateBias =
1017  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1018  paramsInfo.m_CellBias =
1019  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1020  paramsInfo.m_OutputGateBias =
1021  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1022 
1023  result = layerSupportObject.IsQuantizedLstmSupported(input,
1024  previousCellStateIn,
1025  previousOutputIn,
1026  cellStateOut,
1027  output,
1028  paramsInfo,
1029  reason);
1030  break;
1031  }
1032  case LayerType::Division:
1033  {
1034  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1035  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1036  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1037  result = layerSupportObject.IsDivisionSupported(
1038  OverrideDataType(input0, dataType),
1039  OverrideDataType(input1, dataType),
1040  OverrideDataType(output, dataType),
1041  reason);
1042  break;
1043  }
1044  case LayerType::Rank:
1045  {
1046  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1047  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1048  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1049  OverrideDataType(output, dataType),
1050  reason);
1051  break;
1052  }
1053  case LayerType::Reshape:
1054  {
1055  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1056  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1057  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1058  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1059  OverrideDataType(output, dataType),
1060  cLayer->GetParameters(),
1061  reason);
1062  break;
1063  }
1064  case LayerType::Resize:
1065  {
1066  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1067  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1068  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1069  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1070  OverrideDataType(output, dataType),
1071  cLayer->GetParameters(),
1072  reason);
1073  break;
1074  }
1075  case LayerType::Shape:
1076  {
1077  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1078  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1079 
1080  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1081  OverrideDataType(output, dataType),
1082  reason);
1083  break;
1084  }
1085  case LayerType::Slice:
1086  {
1087  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1088 
1089  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1090  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1091 
1092  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1093  OverrideDataType(output, dataType),
1094  cLayer->GetParameters(),
1095  reason);
1096  break;
1097  }
1098  case LayerType::Softmax:
1099  {
1100  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1101  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1103  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1104  OverrideDataType(output, dataType),
1105  cLayer->GetParameters(),
1106  reason);
1107  break;
1108  }
1110  {
1111  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1114  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1115  OverrideDataType(output, dataType),
1116  cLayer->GetParameters(),
1117  reason);
1118  break;
1119  }
1121  {
1122  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1123 
1124  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1125  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1126 
1127  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1128  OverrideDataType(output, dataType),
1129  cLayer->GetParameters(),
1130  reason);
1131  break;
1132  }
1133  case LayerType::Splitter:
1134  {
1135  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1136  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1137 
1138  // Get vector of all outputs.
1139  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1140  {
1141  return OverrideDataType(slot.GetTensorInfo(), dataType);
1142  };
1143  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1144  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1145  std::vector<TensorInfo> outputs(beginI, endI);
1146 
1147  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1148 
1149  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1150  outputPtrs,
1151  cLayer->GetParameters(),
1152  reason);
1153  break;
1154  }
1155  case LayerType::Stack:
1156  {
1157  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1158 
1159  // Get vector of all inputs.
1160  auto getTensorInfo = [&dataType](const InputSlot& slot)
1161  {
1162  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1163  };
1164  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1165  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1166  std::vector<TensorInfo> inputs(beginI, endI);
1167 
1168  auto getTensorInfoPtr = [](const TensorInfo& info)
1169  {
1170  return &info;
1171  };
1172  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1173  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1174  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1175 
1176  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1177 
1178  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1179 
1180  break;
1181  }
1182  case LayerType::StandIn:
1183  {
1184  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1185 
1186  // Get vector of all inputs.
1187  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1188  {
1189  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1190  };
1191  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1192  {
1193  return OverrideDataType(slot.GetTensorInfo(), dataType);
1194  };
1195  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1196  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1197  std::vector<TensorInfo> inputs(beginI, endI);
1198 
1199  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1200  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1201  std::vector<TensorInfo> outputs(beginO, endO);
1202 
1203 
1204  auto getTensorInfoPtr = [](const TensorInfo& info)
1205  {
1206  return &info;
1207  };
1208  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1209  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1210  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1211 
1212  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1213  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1214  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1215 
1216 
1217  result = layerSupportObject.IsStandInSupported(inputPtrs,
1218  outputPtrs,
1219  cLayer->GetParameters(),
1220  reason);
1221  break;
1222  }
1224  {
1225  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1226  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1227  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1228  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1229  OverrideDataType(output, dataType),
1230  cLayer->GetParameters(),
1231  reason);
1232  break;
1233  }
1235  {
1236  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1237  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1238  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1239  result = layerSupportObject.IsSubtractionSupported(
1240  OverrideDataType(input0, dataType),
1241  OverrideDataType(input1, dataType),
1242  OverrideDataType(output, dataType),
1243  reason);
1244  break;
1245  }
1246  case LayerType::Switch:
1247  {
1248  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1249  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1250  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1251  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1252  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1253  OverrideDataType(input1, dataType),
1254  OverrideDataType(output0, dataType),
1255  OverrideDataType(output1, dataType),
1256  reason);
1257  break;
1258  }
1259  case LayerType::Mean:
1260  {
1261  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1262  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1263  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1264  result = layerSupportObject.IsMeanSupported(
1265  OverrideDataType(input, dataType),
1266  OverrideDataType(output, dataType),
1267  cLayer->GetParameters(),
1268  reason);
1269  break;
1270  }
1271  case LayerType::Minimum:
1272  {
1273  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1274  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1275  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1276  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1277  OverrideDataType(input1, dataType),
1278  OverrideDataType(output, dataType),
1279  reason);
1280  break;
1281  }
1282  case LayerType::Prelu:
1283  {
1284  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1285  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1286  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1287  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1288  OverrideDataType(alpha, dataType),
1289  OverrideDataType(output, dataType),
1290  reason);
1291  break;
1292  }
1293  case LayerType::Transpose:
1294  {
1295  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1296  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1297  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1298  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1299  OverrideDataType(output, dataType),
1300  cLayer->GetParameters(),
1301  reason);
1302  break;
1303  }
1305  {
1306  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1307 
1308  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1309  dataType);
1310  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1311 
1312  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1313 
1314  Optional<TensorInfo> biases;
1315  if (descriptor.m_BiasEnabled)
1316  {
1317  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1318  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1319  GetBiasTypeFromWeightsType(dataType));
1320  }
1321 
1322  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1323  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1324 
1325  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1326  output,
1327  descriptor,
1328  weights,
1329  biases,
1330  reason);
1331 
1332  break;
1333  }
1334  case LayerType::Reduce:
1335  {
1336  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1337  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1338  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1339 
1340  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1341  OverrideDataType(output, dataType),
1342  cLayer->GetParameters(),
1343  reason);
1344  break;
1345  }
1347  {
1348  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1349  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1350 
1351  // All inputs.
1352  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1353  dataType);
1354  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1355  dataType);
1356  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1357  dataType);
1358  // Outputs
1359  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1360 
1361  // Basic parameters
1362  const TensorInfo& inputToForgetWeights
1363  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1364  const TensorInfo& inputToCellWeights
1365  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1366  const TensorInfo& inputToOutputWeights
1367  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1368  const TensorInfo& recurrentToForgetWeights
1369  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1370  const TensorInfo& recurrentToCellWeights
1371  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1372  const TensorInfo& recurrentToOutputWeights
1373  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1374  const TensorInfo& forgetGateBias
1375  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1376  const TensorInfo& cellBias
1377  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1378  const TensorInfo& outputGateBias
1379  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1380 
1381  LstmInputParamsInfo paramsInfo;
1382 
1383  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1384  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1385  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1386  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1387  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1388  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1389  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1390  paramsInfo.m_CellBias = &cellBias;
1391  paramsInfo.m_OutputGateBias = &outputGateBias;
1392 
1393  // Optional parameters
1394  TensorInfo optInputToInputWeights;
1395  TensorInfo optRecurrentToInputWeights;
1396  TensorInfo optCellToInputWeights;
1397  TensorInfo optInputGateBias;
1398  TensorInfo optProjectionWeights;
1399  TensorInfo optProjectionBias;
1400  TensorInfo optCellToForgetWeights;
1401  TensorInfo optCellToOutputWeights;
1402  TensorInfo optInputLayerNormWeights;
1403  TensorInfo optForgetLayerNormWeights;
1404  TensorInfo optCellLayerNormWeights;
1405  TensorInfo optOutputLayerNormWeights;
1406 
1407  if(!descriptor.m_CifgEnabled)
1408  {
1409  optInputToInputWeights =
1410  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1411  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1412 
1413  optRecurrentToInputWeights =
1414  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1415  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1416  optInputGateBias =
1417  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1418  paramsInfo.m_InputGateBias = &optInputGateBias;
1419  }
1420 
1421  if(descriptor.m_ProjectionEnabled)
1422  {
1423  optProjectionWeights =
1424  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1425  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1426  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1427  {
1428  optProjectionBias =
1429  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1430  paramsInfo.m_ProjectionBias = &optProjectionBias;
1431  }
1432  }
1433 
1434  if(descriptor.m_PeepholeEnabled)
1435  {
1436  if(!descriptor.m_CifgEnabled)
1437  {
1438  optCellToInputWeights =
1439  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1440  dataType);
1441  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1442  }
1443  optCellToForgetWeights =
1444  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1445  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1446  optCellToOutputWeights =
1447  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1448  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1449  }
1450 
1451  if(descriptor.m_LayerNormEnabled)
1452  {
1453  if (!descriptor.m_CifgEnabled)
1454  {
1455  optInputLayerNormWeights = OverrideDataType(
1456  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1457  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1458  }
1459 
1460  optForgetLayerNormWeights = OverrideDataType(
1461  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1462  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1463 
1464  optCellLayerNormWeights = OverrideDataType(
1465  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1466  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1467 
1468  optOutputLayerNormWeights = OverrideDataType(
1469  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1470  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1471  }
1472 
1473  Optional<TensorInfo> hiddenStateOut;
1474  Optional<TensorInfo> cellStateOut;
1475 
1476  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1477  outputStateIn,
1478  cellStateIn,
1479  output,
1480  hiddenStateOut,
1481  cellStateOut,
1482  descriptor,
1483  paramsInfo,
1484  reason);
1485  break;
1486  }
1487  default:
1488  {
1489  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1490  reason.value() = "Unrecognised layer type";
1491  result = false;
1492  break;
1493  }
1494  }
1495  return result;
1496 }
1497 
1499  const IConnectableLayer& connectableLayer,
1500  Optional<DataType> dataType,
1501  std::string& outReasonIfUnsupported)
1502 {
1503  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1504 }
1505 
1507  Optional<DataType> dataType,
1508  std::string& outReasonIfUnsupported)
1509 {
1510  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1511  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1512 }
1513 
1514 // TODO merge with defaulted modelOptions above
1516  Optional<DataType> dataType,
1517  std::string& outReasonIfUnsupported,
1518  const ModelOptions& modelOptions)
1519 {
1520  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1521  return IsLayerConfigurationSupported(layer->GetBackendId(),
1522  connectableLayer,
1523  dataType,
1524  outReasonIfUnsupported,
1525  modelOptions);
1526 }
1527 
1529  const IConnectableLayer& connectableLayer,
1530  Optional<DataType> dataType,
1531  std::string& outReasonIfUnsupported,
1532  const ModelOptions& modelOptions)
1533 {
1534  return IsLayerConfigurationSupported(backendId,
1535  connectableLayer,
1536  dataType,
1537  outReasonIfUnsupported,
1538  modelOptions);
1539 }
1541 std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
1542  const QueueDescriptor& descriptor,
1543  const WorkloadInfo& info) const
1544 {
1545  switch(type)
1546  {
1547  case LayerType::Activation :
1548  {
1549  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
1550  return CreateActivation(*activationQueueDescriptor, info);
1551  }
1552  case LayerType::Addition :
1553  {
1554  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
1555  return CreateAddition(*additionQueueDescriptor, info);
1556  }
1557  case LayerType::ArgMinMax :
1558  {
1559  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
1560  return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
1561  }
1563  {
1564  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
1565  return CreateBatchNormalization(*batchNormQueueDescriptor, info);
1566  }
1568  {
1569  auto batchToSpaceNdQueueDescriptor
1570  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
1571  return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
1572  }
1573  case LayerType::Cast :
1574  {
1575  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
1576  return CreateCast(*castQueueDescriptor, info);
1577  }
1579  {
1580  auto channelShuffleQueueDescriptor
1581  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
1582  return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
1583  }
1584  case LayerType::Comparison :
1585  {
1586  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
1587  return CreateComparison(*comparisonQueueDescriptor, info);
1588  }
1589  case LayerType::Concat :
1590  {
1591  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
1592  return CreateConcat(*concatQueueDescriptor, info);
1593  }
1594  case LayerType::Constant :
1595  {
1596  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
1597  return CreateConstant(*constantQueueDescriptor, info);
1598  }
1600  {
1601  auto convertBf16ToFp32QueueDescriptor
1602  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
1603  return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
1604  }
1606  {
1607  auto convertFp16ToFp32QueueDescriptor
1608  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
1609  return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
1610  }
1612  {
1613  auto convertFp32ToBf16QueueDescriptor
1614  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
1615  return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
1616  }
1618  {
1619  auto convertFp32ToFp16QueueDescriptor
1620  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
1621  return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
1622  }
1624  {
1625  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
1626  return CreateConvolution2d(*convolution2dQueueDescriptor, info);
1627  }
1629  {
1630  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
1631  return CreateConvolution3d(*convolution3dQueueDescriptor, info);
1632  }
1633  case LayerType::Debug:
1634  {
1635  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
1636  return CreateDebug(*debugQueueDescriptor, info);
1637  }
1639  {
1640  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
1641  return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
1642  }
1644  {
1645  auto depthwiseConvolution2DQueueDescriptor
1646  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
1647  return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
1648  }
1649  case LayerType::Dequantize:
1650  {
1651  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
1652  return CreateDequantize(*dequantizeQueueDescriptor, info);
1653  }
1655  {
1656  auto detectionPostProcessQueueDescriptor
1657  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
1658  return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
1659  }
1660  case LayerType::Division:
1661  {
1662  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
1663  return CreateDivision(*divisionQueueDescriptor, info);
1664  }
1666  {
1667  auto elementwiseUnaryQueueDescriptor
1668  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
1669  return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
1670 
1671  }
1673  {
1674  auto fakeQuantizationQueueDescriptor
1675  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
1676  return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
1677  }
1678  case LayerType::Fill:
1679  {
1680  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
1681  return CreateFill(*fillQueueDescriptor, info);
1682  }
1683  case LayerType::Floor:
1684  {
1685  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
1686  return CreateFloor(*floorQueueDescriptor, info);
1687  }
1689  {
1690  auto fullyConnectedQueueDescriptor
1691  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
1692  return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
1693  }
1694  case LayerType::Gather:
1695  {
1696  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
1697  return CreateGather(*gatherQueueDescriptor, info);
1698  }
1699  case LayerType::Input:
1700  {
1701  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
1702  return CreateInput(*inputQueueDescriptor, info);
1703  }
1705  {
1706  auto instanceNormalizationQueueDescriptor
1707  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
1708  return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
1709  }
1711  {
1712  auto l2NormalizationQueueDescriptor
1713  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
1714  return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
1715  }
1717  {
1718  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
1719  return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
1720  }
1721  case LayerType::LogSoftmax:
1722  {
1723  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
1724  return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
1725  }
1726  case LayerType::Lstm:
1727  {
1728  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
1729  return CreateLstm(*lstmQueueDescriptor, info);
1730  }
1731  case LayerType::Maximum:
1732  {
1733  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
1734  return CreateMaximum(*maximumQueueDescriptor, info);
1735  }
1736  case LayerType::Mean:
1737  {
1738  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
1739  return CreateMean(*meanQueueDescriptor, info);
1740  }
1741  case LayerType::MemCopy:
1742  {
1743  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
1744  return CreateMemCopy(*memCopyQueueDescriptor, info);
1745  }
1746  case LayerType::MemImport:
1747  {
1748  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
1749  return CreateMemImport(*memImportQueueDescriptor, info);
1750  }
1751  case LayerType::Minimum:
1752  {
1753  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
1754  return CreateMinimum(*minimumQueueDescriptor, info);
1755  }
1757  {
1758  auto multiplicationQueueDescriptor
1759  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
1760  return CreateMultiplication(*multiplicationQueueDescriptor, info);
1761  }
1763  {
1764  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
1765  return CreateNormalization(*normalizationQueueDescriptor, info);
1766  }
1767  case LayerType::Output:
1768  {
1769  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
1770  return CreateOutput(*outputQueueDescriptor, info);
1771  }
1772  case LayerType::Pad:
1773  {
1774  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
1775  return CreatePad(*padQueueDescriptor, info);
1776  }
1777  case LayerType::Permute:
1778  {
1779  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
1780  return CreatePermute(*permuteQueueDescriptor, info);
1781  }
1782  case LayerType::Pooling2d:
1783  {
1784  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
1785  return CreatePooling2d(*pooling2dQueueDescriptor, info);
1786  }
1787  case LayerType::Pooling3d:
1788  {
1789  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
1790  return CreatePooling3d(*pooling3dQueueDescriptor, info);
1791  }
1793  {
1794  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
1795  return CreatePreCompiled(*preCompiledQueueDescriptor, info);
1796  }
1797  case LayerType::Prelu:
1798  {
1799  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
1800  return CreatePrelu(*preluQueueDescriptor, info);
1801  }
1802  case LayerType::QLstm:
1803  {
1804  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
1805  return CreateQLstm(*qlstmQueueDescriptor, info);
1806  }
1807  case LayerType::Quantize:
1808  {
1809  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
1810  return CreateQuantize(*quantizeQueueDescriptor, info);
1811  }
1812  case LayerType::Rank:
1813  {
1814  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
1815  return CreateRank(*rankQueueDescriptor, info);
1816  }
1817  case LayerType::Reduce:
1818  {
1819  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
1820  return CreateReduce(*reduceQueueDescriptor, info);
1821  }
1822  case LayerType::Reshape:
1823  {
1824  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
1825  return CreateReshape(*reshapeQueueDescriptor, info);
1826  }
1827  case LayerType::Resize:
1828  {
1829  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
1830  return CreateResize(*resizeQueueDescriptor, info);
1831  }
1832  case LayerType::Shape:
1833  {
1834  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
1835  return CreateShape(*shapeQueueDescriptor, info);
1836  }
1837  case LayerType::Slice:
1838  {
1839  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
1840  return CreateSlice(*sliceQueueDescriptor, info);
1841  }
1842  case LayerType::Softmax:
1843  {
1844  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
1845  return CreateSoftmax(*softmaxQueueDescriptor, info);
1846  }
1848  {
1849  auto spaceToBatchNdQueueDescriptor
1850  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
1851  return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
1852  }
1854  {
1855  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
1856  return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
1857  }
1858  case LayerType::Splitter:
1859  {
1860  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
1861  return CreateSplitter(*splitterQueueDescriptor, info);
1862  }
1863  case LayerType::Stack:
1864  {
1865  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
1866  return CreateStack(*stackQueueDescriptor, info);
1867  }
1869  {
1870  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
1871  return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
1872  }
1874  {
1875  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
1876  return CreateSubtraction(*subtractionQueueDescriptor, info);
1877  }
1878  case LayerType::Transpose:
1879  {
1880  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
1881  return CreateTranspose(*transposeQueueDescriptor, info);
1882  }
1884  {
1885  auto transposeConvolution2dQueueDescriptor
1886  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
1887  return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
1888  }
1890  {
1891  auto unidirectionalSequenceLstmQueueDescriptor
1892  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
1893  return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
1894  }
1895  default:
1896  return nullptr;
1897  }
1898 }
1900 
1901 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1902  const WorkloadInfo& /*info*/) const
1903 {
1904  return std::unique_ptr<IWorkload>();
1905 }
1906 
1907 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1908  const WorkloadInfo& /*info*/) const
1909 {
1910  return std::unique_ptr<IWorkload>();
1911 }
1912 
1913 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1914  const WorkloadInfo& /*info*/) const
1915 {
1916  return std::unique_ptr<IWorkload>();
1917 }
1918 
1919 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1920  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1921 {
1922  return std::unique_ptr<IWorkload>();
1923 }
1924 
1925 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1926  const WorkloadInfo& /*Info*/) const
1927 {
1928  return std::unique_ptr<IWorkload>();
1929 }
1930 
1931 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1932  const WorkloadInfo& /*info*/) const
1933 {
1934  return std::unique_ptr<IWorkload>();
1935 }
1936 
1937 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1938  const WorkloadInfo& /*info*/) const
1939 {
1940  return std::unique_ptr<IWorkload>();
1941 }
1942 
1943 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1944  const WorkloadInfo& /*info*/) const
1945 {
1946  return std::unique_ptr<IWorkload>();
1947 }
1948 
1949 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1950  const WorkloadInfo& /*info*/) const
1951 {
1952  return std::unique_ptr<IWorkload>();
1953 }
1954 
1955 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1956  const WorkloadInfo& /*info*/) const
1957 {
1958  return std::unique_ptr<IWorkload>();
1959 }
1960 
1961 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
1962  const WorkloadInfo& /*info*/) const
1963 {
1964  return std::unique_ptr<IWorkload>();
1965 }
1966 
1967 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
1968  const WorkloadInfo& /*info*/) const
1969 {
1970  return std::unique_ptr<IWorkload>();
1971 }
1972 
1973 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
1974  const WorkloadInfo& /*info*/) const
1975 {
1976  return std::unique_ptr<IWorkload>();
1977 }
1978 
1979 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
1980  const WorkloadInfo& /*info*/) const
1981 {
1982  return std::unique_ptr<IWorkload>();
1983 }
1984 
1985 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1986  const WorkloadInfo& /*info*/) const
1987 {
1988  return std::unique_ptr<IWorkload>();
1989 }
1990 
1991 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
1992  const WorkloadInfo& /*info*/) const
1993 {
1994  return std::unique_ptr<IWorkload>();
1995 }
1996 
1997 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1998  const WorkloadInfo& /*info*/) const
1999 {
2000  return std::unique_ptr<IWorkload>();
2001 }
2002 
2003 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
2004  const WorkloadInfo& /*info*/) const
2005 {
2006  return std::unique_ptr<IWorkload>();
2007 }
2008 
2009 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
2010  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2011 {
2012  return std::unique_ptr<IWorkload>();
2013 }
2014 
2015 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
2016  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2017 {
2018  return std::unique_ptr<IWorkload>();
2019 }
2020 
2021 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
2022  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2023 {
2024  return std::unique_ptr<IWorkload>();
2025 }
2026 
2027 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
2028  const WorkloadInfo& /*info*/) const
2029 {
2030  return std::unique_ptr<IWorkload>();
2031 }
2032 
2033 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2034  const WorkloadInfo& /*info*/) const
2035 {
2036  return std::unique_ptr<IWorkload>();
2037 }
2038 
2039 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
2040  const WorkloadInfo& /*info*/) const
2041 {
2042  return std::unique_ptr<IWorkload>();
2043 }
2044 
2045 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
2046  const WorkloadInfo& /*info*/) const
2047 {
2048  return std::unique_ptr<IWorkload>();
2049 }
2050 
2051 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
2052  const WorkloadInfo& /*info*/) const
2053 {
2054  return std::unique_ptr<IWorkload>();
2055 }
2056 
2057 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
2058  const WorkloadInfo& /*info*/) const
2059 {
2060  return std::unique_ptr<IWorkload>();
2061 }
2062 
2063 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
2064  const WorkloadInfo& /*info*/) const
2065 {
2066  return std::unique_ptr<IWorkload>();
2067 }
2068 
2069 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
2070  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
2071  const WorkloadInfo& /*info*/) const
2072 {
2073  return std::unique_ptr<IWorkload>();
2074 }
2075 
2076 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
2077  const WorkloadInfo& /*info*/) const
2078 {
2079  return std::unique_ptr<IWorkload>();
2080 }
2081 
2082 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
2083  const WorkloadInfo& /*info*/) const
2084 {
2085  return std::unique_ptr<IWorkload>();
2086 }
2087 
2088 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2089  const WorkloadInfo& /*info*/) const
2090 {
2091  return std::unique_ptr<IWorkload>();
2092 }
2093 
2094 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
2095  const WorkloadInfo& /*info*/) const
2096 {
2097  return std::unique_ptr<IWorkload>();
2098 }
2099 
2100 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
2101  const WorkloadInfo& /*info*/) const
2102 {
2103  return std::unique_ptr<IWorkload>();
2104 }
2105 
2106 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
2107  const WorkloadInfo& /*info*/) const
2108 {
2109  return std::unique_ptr<IWorkload>();
2110 }
2111 
2112 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
2113  const WorkloadInfo& /*Info*/) const
2114 {
2115  return std::unique_ptr<IWorkload>();
2116 }
2117 
2118 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
2119  const WorkloadInfo& /*info*/) const
2120 {
2121  return std::unique_ptr<IWorkload>();
2122 }
2123 
2124 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
2125  const WorkloadInfo& /*info*/) const
2126 {
2127  return std::unique_ptr<IWorkload>();
2128 }
2129 
2130 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
2131  const WorkloadInfo& /*info*/) const
2132 {
2133  return std::unique_ptr<IWorkload>();
2134 }
2135 
2136 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
2137  const WorkloadInfo& /*info*/) const
2138 {
2139  return std::unique_ptr<IWorkload>();
2140 }
2141 
2142 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
2143  const WorkloadInfo& /*info*/) const
2144 {
2145  return std::unique_ptr<IWorkload>();
2146 }
2147 
2148 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
2149  const WorkloadInfo& /*info*/) const
2150 {
2151  return std::unique_ptr<IWorkload>();
2152 }
2153 
2154 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
2155  const WorkloadInfo& /*info*/) const
2156 {
2157  return std::unique_ptr<IWorkload>();
2158 }
2159 
2160 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
2161  const WorkloadInfo& /*Info*/) const
2162 {
2163  return std::unique_ptr<IWorkload>();
2164 }
2165 
2166 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
2167  const WorkloadInfo& /*info*/) const
2168 {
2169  return std::unique_ptr<IWorkload>();
2170 }
2171 
2172 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
2173  const WorkloadInfo& /*info*/) const
2174 {
2175  return std::unique_ptr<IWorkload>();
2176 }
2177 
2178 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/,
2179  const WorkloadInfo& /*info*/) const
2180 {
2181  return std::unique_ptr<IWorkload>();
2182 }
2183 
2184 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
2185  const WorkloadInfo& /*info*/) const
2186 {
2187  return std::unique_ptr<IWorkload>();
2188 }
2189 
2190 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
2191  const WorkloadInfo &/*info*/) const
2192 {
2193  return std::unique_ptr<IWorkload>();
2194 }
2195 
2196 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
2197  const WorkloadInfo& /*Info*/) const
2198 {
2199  return std::unique_ptr<IWorkload>();
2200 }
2201 
2202 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
2203  const WorkloadInfo& /*info*/) const
2204 {
2205  return std::unique_ptr<IWorkload>();
2206 }
2207 
2208 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
2209  const WorkloadInfo& /*info*/) const
2210 {
2211  return std::unique_ptr<IWorkload>();
2212 }
2213 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
2214  const WorkloadInfo& /*info*/) const
2215 {
2216  return std::unique_ptr<IWorkload>();
2217 }
2218 
2219 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
2220  const WorkloadInfo& /*info*/) const
2221 {
2222  return std::unique_ptr<IWorkload>();
2223 }
2224 
2225 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
2226  const WorkloadInfo& /*info*/) const
2227 {
2228  return std::unique_ptr<IWorkload>();
2229 }
2230 
2231 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
2232  const WorkloadInfo& /*info*/) const
2233 {
2234  return std::unique_ptr<IWorkload>();
2235 }
2236 
2237 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
2238  const WorkloadInfo& /*info*/) const
2239 {
2240  return std::unique_ptr<IWorkload>();
2241 }
2242 
2243 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
2244  const WorkloadInfo& /*info*/) const
2245 {
2246  return std::unique_ptr<IWorkload>();
2247 }
2248 
2249 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
2250  const WorkloadInfo& /*info*/) const
2251 {
2252  return std::unique_ptr<IWorkload>();
2253 }
2254 
2255 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
2256  const WorkloadInfo& /*info*/) const
2257 {
2258  return std::unique_ptr<IWorkload>();
2259 }
2260 
2261 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
2262  const WorkloadInfo& /*info*/) const
2263 {
2264  return std::unique_ptr<IWorkload>();
2265 }
2266 
2267 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
2268  const WorkloadInfo& /*info*/) const
2269 {
2270  return std::unique_ptr<IWorkload>();
2271 }
2272 
2273 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
2274  const WorkloadInfo& /*info*/) const
2275 {
2276  return std::unique_ptr<IWorkload>();
2277 }
2278 
2279 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
2280  const WorkloadInfo& /*info*/) const
2281 {
2282  return std::unique_ptr<IWorkload>();
2283 }
2284 
2285 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
2286  const WorkloadInfo& /*info*/) const
2287 {
2288  return std::unique_ptr<IWorkload>();
2289 }
2290 
2291 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
2292  const WorkloadInfo& /*info*/) const
2293 {
2294  return std::unique_ptr<IWorkload>();
2295 }
2296 
2297 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
2298  const WorkloadInfo& /*info*/) const
2299 {
2300  return std::unique_ptr<IWorkload>();
2301 }
2302 
2303 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
2304  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
2305  const WorkloadInfo& /*info*/) const
2306 {
2307  return std::unique_ptr<IWorkload>();
2308 }
2309 
2310 std::unique_ptr<IWorkload> IWorkloadFactory::CreateUnidirectionalSequenceLstm(
2311  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
2312  const WorkloadInfo& /*info*/) const
2313 {
2314  return std::unique_ptr<IWorkload>();
2315 }
2316 
2317 } // namepsace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::vector< BackendOptions > ModelOptions
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
BackendRegistry & BackendRegistryInstance()
Copyright (c) 2021 ARM Limited and Contributors.
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LstmDescriptor UnidirectionalSequenceLstmDescriptor
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458