ArmNN
 23.02
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
20 #include <sstream>
21 
22 namespace armnn
23 {
24 
25 namespace
26 {
27 using LayerList = std::list<Layer*>;
28 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
29 
30 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
31 {
32  if (!type)
33  {
34  return info;
35  }
36 
37  return TensorInfo(info.GetShape(),
38  type.value(),
39  info.GetQuantizationScale(),
40  info.GetQuantizationOffset(),
41  info.IsConstant());
42 }
43 
44 } // anonymous namespace
45 
47 {
48  if (!weightsType)
49  {
50  return weightsType;
51  }
52 
53  switch(weightsType.value())
54  {
58  return weightsType;
64  default:
65  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
66  }
67  return armnn::EmptyOptional();
68 }
69 
70 
71 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
72  const IConnectableLayer& connectableLayer,
73  Optional<DataType> dataType,
74  std::string& outReasonIfUnsupported,
75  const ModelOptions& modelOptions)
76 {
77  Optional<std::string&> reason = outReasonIfUnsupported;
78  bool result;
79  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
80 
81  auto const& backendRegistry = BackendRegistryInstance();
82  if (!backendRegistry.IsBackendRegistered(backendId))
83  {
84  std::stringstream ss;
85  ss << connectableLayer.GetName() << " is not supported on " << backendId
86  << " because this backend is not registered.";
87 
88  outReasonIfUnsupported = ss.str();
89  return false;
90  }
91 
92  auto backendFactory = backendRegistry.GetFactory(backendId);
93  auto backendObject = backendFactory();
94  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
112  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
114  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
115  result = layerSupportObject.IsAdditionSupported(
116  OverrideDataType(input0, dataType),
117  OverrideDataType(input1, dataType),
118  OverrideDataType(output, dataType),
119  reason);
120  break;
121  }
123  {
124  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
125  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
126 
127  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
128  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
129  result = layerSupportObject.IsArgMinMaxSupported(
130  OverrideDataType(input, dataType),
131  OverrideDataType(output, DataType::Signed32),
132  descriptor,
133  reason);
134  break;
135  }
137  {
138  auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
139  const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
140 
141  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
142  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
143  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
144  result = layerSupportObject.IsBatchMatMulSupported(
145  OverrideDataType(input0, dataType),
146  OverrideDataType(input1, dataType),
147  OverrideDataType(output, dataType),
148  descriptor,
149  reason);
150  break;
151  }
153  {
154  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
155  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
156  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
157  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
158  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
159  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
160  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
161  result = layerSupportObject.IsBatchNormalizationSupported(
162  OverrideDataType(input, dataType),
163  OverrideDataType(output, dataType),
164  OverrideDataType(mean, dataType),
165  OverrideDataType(var, dataType),
166  OverrideDataType(beta, dataType),
167  OverrideDataType(gamma, dataType),
168  cLayer->GetParameters(),
169  reason);
170  break;
171  }
173  {
174  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
175  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
176  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
177 
178  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
179  OverrideDataType(output, dataType),
180  cLayer->GetParameters(),
181  reason);
182  break;
183  }
184  case LayerType::Cast:
185  {
186  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
187  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
188 
189  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
190  OverrideDataType(output, dataType),
191  reason);
192  break;
193  }
195  {
196  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
197 
198  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
199  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
200 
201  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
202 
203  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
204  OverrideDataType(output, dataType),
205  descriptor,
206  reason);
207  break;
208  }
210  {
211  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
212 
213  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
214  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
215  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
216 
217  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
218  OverrideDataType(input1, dataType),
219  OverrideDataType(output, DataType::Boolean),
220  cLayer->GetParameters(),
221  reason);
222  break;
223  }
224  case LayerType::Constant:
225  {
226  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
227  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
228  break;
229  }
231  {
232  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
233  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
234  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
235  break;
236  }
238  {
239  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
240  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
241  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
242  break;
243  }
245  {
246  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
247 
248  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
249  dataType);
250  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
251  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
252  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
253  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
254  dataType);
255 
256  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
257 
258  // Construct optional biases object based on the value of m_BiasEnabled
259  Optional<TensorInfo> biases;
260  if (descriptor.m_BiasEnabled)
261  {
262  ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
263  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
264  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
265  GetBiasTypeFromWeightsType(dataType));
266  }
267 
268  result = layerSupportObject.IsConvolution2dSupported(
269  input,
270  output,
271  descriptor,
272  weights,
273  biases,
274  reason);
275  break;
276  }
278  {
279  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
280 
281  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
282  dataType);
283  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
284 
285  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
286  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
287  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
288  dataType);
289 
290  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
291 
292  // Construct optional biases object based on the value of m_BiasEnabled
293  Optional<TensorInfo> biases;
294  if (descriptor.m_BiasEnabled)
295  {
296  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
297  GetBiasTypeFromWeightsType(dataType));
298  }
299 
300  result = layerSupportObject.IsConvolution3dSupported(
301  input,
302  output,
303  descriptor,
304  weights,
305  biases,
306  reason);
307  break;
308  }
309  case LayerType::Debug:
310  {
311  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
312  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
313 
314  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
315  OverrideDataType(output, dataType),
316  reason);
317  break;
318  }
320  {
321  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
322 
323  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
324  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
325 
326  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
327  OverrideDataType(output, dataType),
328  cLayer->GetParameters(),
329  reason);
330  break;
331  }
333  {
334  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
335  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
336  dataType);
337  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
338  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
339  dataType);
340 
341  ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
342 
343  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
344 
345  // Construct optional biases object based on the value of m_BiasEnabled
346  Optional<TensorInfo> biases;
347  if (descriptor.m_BiasEnabled)
348  {
349  biases = OverrideDataType(cLayer->GetInputSlot(2).GetConnection()->GetTensorInfo(),
350  GetBiasTypeFromWeightsType(dataType));
351  }
352 
353  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
354  output,
355  descriptor,
356  weights,
357  biases,
358  reason);
359  break;
360  }
362  {
363  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
364  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
365 
366  result = layerSupportObject.IsDequantizeSupported(input,
367  OverrideDataType(output, dataType),
368  reason);
369  break;
370  }
372  {
373  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
374  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
375  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
376  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
377 
378  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
379  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
380  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
381  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
382 
383  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
384  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
385  scores,
386  anchors,
387  detectionBoxes,
388  detectionClasses,
389  detectionScores,
390  numDetections,
391  descriptor,
392  reason);
393  break;
394  }
396  {
397  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
398 
399  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
400  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
401 
402  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
403  OverrideDataType(output, dataType),
404  cLayer->GetParameters(),
405  reason);
406  break;
407  }
408  case LayerType::Fill:
409  {
410  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
411  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
412  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
413  const FillDescriptor& descriptor = cLayer->GetParameters();
414 
415  result = layerSupportObject.IsFillSupported(
416  OverrideDataType(input, dataType),
417  OverrideDataType(output, dataType),
418  descriptor,
419  reason);
420  break;
421  }
423  {
424  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
425  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
426  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
427  cLayer->GetParameters(),
428  reason);
429  break;
430  }
431  case LayerType::Floor:
432  {
433  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
434  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
435  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
436  OverrideDataType(output, dataType),
437  reason);
438  break;
439  }
441  {
442  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
443  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
444  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
445 
446  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
447  TensorInfo weightsInfo;
448  const TensorInfo* weightsInfoPtr = nullptr;
449 
450  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
451  weightsInfoPtr = &weightsInfo;
452 
453  TensorInfo biasInfo;
454  const TensorInfo* biasInfoPtr = nullptr;
455  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
456  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
457  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
458  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
459 
460  if (descriptor.m_BiasEnabled)
461  {
462  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
463  biasInfoPtr = &biasInfo;
464  }
465  else
466  {
467  // If biases are not enabled pass a dummy tensorinfo for the validation
468  switch(input.GetDataType())
469  {
470  case DataType::BFloat16:
471  {
472  biasInfoPtr = &dummyBFloat16Bias;
473  break;
474  }
475  case DataType::Float16:
476  {
477  biasInfoPtr = &dummyFloat16Bias;
478  break;
479  }
480  case DataType::Float32:
481  {
482  biasInfoPtr = &dummyFloat32Bias;
483  break;
484  }
485  case DataType::QAsymmU8:
486  case DataType::QAsymmS8:
487  case DataType::QSymmS8:
488  case DataType::QSymmS16:
489  {
490  biasInfoPtr = &dummyQA8Bias;
491  break;
492  }
493  default:
494  {
495  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
496  }
497  }
498  }
499  result = layerSupportObject.IsFullyConnectedSupported(
500  OverrideDataType(input, dataType),
501  OverrideDataType(output, dataType),
502  *weightsInfoPtr,
503  *biasInfoPtr,
504  descriptor,
505  reason);
506  break;
507  }
508  case LayerType::Gather:
509  {
510  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
511  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
512  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
513  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
514  const GatherDescriptor& descriptor = cLayer->GetParameters();
515  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
516  input1,
517  OverrideDataType(output, dataType),
518  descriptor,
519  reason);
520  break;
521  }
522  case LayerType::GatherNd:
523  {
524  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
525  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
526  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
527  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
528  input1,
529  OverrideDataType(output, dataType),
530  reason);
531  break;
532  }
533  case LayerType::Input:
534  {
535  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
536  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
537  break;
538  }
540  {
541  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
542  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
543 
544  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
545  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
546 
547  result = layerSupportObject.IsInstanceNormalizationSupported(
548  OverrideDataType(input, dataType),
549  OverrideDataType(output, dataType),
550  descriptor,
551  reason);
552  break;
553  }
555  {
556  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
557  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
558 
559  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
560  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
561 
562  result = layerSupportObject.IsL2NormalizationSupported(
563  OverrideDataType(input, dataType),
564  OverrideDataType(output, dataType),
565  descriptor,
566  reason);
567  break;
568  }
570  {
571  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
572 
573  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
574  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
575  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
576 
577  result = layerSupportObject.IsLogicalBinarySupported(input0,
578  input1,
579  output,
580  cLayer->GetParameters(),
581  reason);
582  break;
583  }
585  {
586  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
587 
588  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
589  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
590 
591  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
592  OverrideDataType(output, dataType),
593  cLayer->GetParameters(),
594  reason);
595  break;
596  }
597  case LayerType::Lstm:
598  {
599  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
600  const LstmDescriptor& descriptor = cLayer->GetParameters();
601 
602  // All inputs.
603  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
604  dataType);
605  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
606  dataType);
607  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
608  dataType);
609  // All outputs
610  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
611  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
612  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
613  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
614 
615  // Basic parameters
616  const TensorInfo& inputToForgetWeights
617  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
618  const TensorInfo& inputToCellWeights
619  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
620  const TensorInfo& inputToOutputWeights
621  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
622  const TensorInfo& recurrentToForgetWeights
623  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
624  const TensorInfo& recurrentToCellWeights
625  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
626  const TensorInfo& recurrentToOutputWeights
627  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
628  const TensorInfo& forgetGateBias
629  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
630  const TensorInfo& cellBias
631  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
632  const TensorInfo& outputGateBias
633  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
634 
635  LstmInputParamsInfo paramsInfo;
636 
637  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
638  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
639  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
640  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
641  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
642  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
643  paramsInfo.m_ForgetGateBias = &forgetGateBias;
644  paramsInfo.m_CellBias = &cellBias;
645  paramsInfo.m_OutputGateBias = &outputGateBias;
646 
647 
648  // Optional parameters
649  TensorInfo optInputToInputWeights;
650  TensorInfo optRecurrentToInputWeights;
651  TensorInfo optCellToInputWeights;
652  TensorInfo optInputGateBias;
653  TensorInfo optProjectionWeights;
654  TensorInfo optProjectionBias;
655  TensorInfo optCellToForgetWeights;
656  TensorInfo optCellToOutputWeights;
657  TensorInfo optInputLayerNormWeights;
658  TensorInfo optForgetLayerNormWeights;
659  TensorInfo optCellLayerNormWeights;
660  TensorInfo optOutputLayerNormWeights;
661 
662  if(!descriptor.m_CifgEnabled)
663  {
664  optInputToInputWeights =
665  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
666  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
667 
668  optRecurrentToInputWeights =
669  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
670  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
671  optInputGateBias =
672  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
673  paramsInfo.m_InputGateBias = &optInputGateBias;
674  }
675 
676  if(descriptor.m_ProjectionEnabled)
677  {
678  optProjectionWeights =
679  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
680  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
681  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
682  {
683  optProjectionBias =
684  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
685  paramsInfo.m_ProjectionBias = &optProjectionBias;
686  }
687  }
688 
689  if(descriptor.m_PeepholeEnabled)
690  {
691  if(!descriptor.m_CifgEnabled)
692  {
693  optCellToInputWeights =
694  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
695  dataType);
696  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
697  }
698  optCellToForgetWeights =
699  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
700  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
701  optCellToOutputWeights =
702  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
703  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
704  }
705 
706  if(descriptor.m_LayerNormEnabled)
707  {
708  if (!descriptor.m_CifgEnabled)
709  {
710  optInputLayerNormWeights = OverrideDataType(
711  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
712  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
713  }
714 
715  optForgetLayerNormWeights = OverrideDataType(
716  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
717  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
718 
719  optCellLayerNormWeights = OverrideDataType(
720  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
721  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
722 
723  optOutputLayerNormWeights = OverrideDataType(
724  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
725  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
726  }
727 
728  result = layerSupportObject.IsLstmSupported(
729  input,
730  outputStateIn,
731  cellStateIn,
732  scratchBuffer,
733  outputStateOut,
734  cellStateOut,
735  output,
736  descriptor,
737  paramsInfo,
738  reason);
739  break;
740  }
741  case LayerType::Maximum:
742  {
743  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
744  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
745  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
746 
747  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
748  OverrideDataType(input1, dataType),
749  OverrideDataType(output, dataType),
750  reason);
751  break;
752  }
753  case LayerType::MemCopy:
754  {
755  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
756  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
757 
758  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
759  OverrideDataType(output, dataType),
760  reason);
761  break;
762  }
764  {
765  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
766  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
767 
768  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
769  OverrideDataType(output, dataType),
770  reason);
771  break;
772  }
773  case LayerType::Merge:
774  {
775  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
776  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
777  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
778 
779  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
780  OverrideDataType(input1, dataType),
781  OverrideDataType(output, dataType),
782  reason);
783  break;
784  }
785  case LayerType::Concat:
786  {
787  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
788 
789  // Get vector of all inputs.
790  auto getTensorInfo = [&dataType](const InputSlot& slot)
791  {
792  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
793  };
794 
795  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
796  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
797  std::vector<TensorInfo> inputs(beginI, endI);
798 
799  auto getTensorInfoPtr = [](const TensorInfo& info)
800  {
801  return &info;
802  };
803 
804  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
805  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
806  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
807 
808  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
809 
810  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
811 
812 
813  break;
814  }
816  {
817  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
818  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
819  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
820  result = layerSupportObject.IsMultiplicationSupported(
821  OverrideDataType(input0, dataType),
822  OverrideDataType(input1, dataType),
823  OverrideDataType(output, dataType),
824  reason);
825  break;
826  }
828  {
829  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
830  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
831  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
832  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
833  OverrideDataType(output, dataType),
834  cLayer->GetParameters(),
835  reason);
836  break;
837  }
838  case LayerType::Output:
839  {
840  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
841  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
842  break;
843  }
844  case LayerType::Permute:
845  {
846  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
847  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
848  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
849  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
850  OverrideDataType(output, dataType),
851  cLayer->GetParameters(),
852  reason);
853  break;
854  }
855  case LayerType::Pad:
856  {
857  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
858  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
859  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
860  result = layerSupportObject.IsPadSupported(
861  OverrideDataType(input, dataType),
862  OverrideDataType(output, dataType),
863  cLayer->GetParameters(),
864  reason);
865  break;
866  }
868  {
869  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
870  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
871  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
872  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
873  OverrideDataType(output, dataType),
874  cLayer->GetParameters(),
875  reason);
876  break;
877  }
879  {
880  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
881  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
882  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
883  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
884  OverrideDataType(output, dataType),
885  cLayer->GetParameters(),
886  reason);
887  break;
888  }
890  {
891  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
892  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
893  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
894  cLayer->GetParameters(),
895  reason);
896  break;
897  }
898  case LayerType::Quantize:
899  {
900  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
901  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
902  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
903  break;
904  }
905  case LayerType::QLstm:
906  {
907  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
908  const QLstmDescriptor& descriptor = cLayer->GetParameters();
909 
910  // Inputs
911  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
912  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
913  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
914 
915  // Outputs
916  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
917  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
918  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
919 
920  // Lstm parameters
921  LstmInputParamsInfo paramsInfo;
922 
923  // Basic parameters
924  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
925  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
926  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
927  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
928  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
929  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
930 
931  paramsInfo.m_RecurrentToForgetWeights =
932  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
933  paramsInfo.m_RecurrentToCellWeights =
934  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
935  paramsInfo.m_RecurrentToOutputWeights =
936  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
937 
938  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
939  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
940  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
941 
942  if(!descriptor.m_CifgEnabled)
943  {
944  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
945  paramsInfo.m_RecurrentToInputWeights =
946  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
947  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
948  }
949 
950  if(descriptor.m_ProjectionEnabled)
951  {
952  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
953 
954  // Projection bias is optional even if projection is enabled
955  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
956  {
957  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
958  }
959  }
960 
961  if(descriptor.m_PeepholeEnabled)
962  {
963  if (!descriptor.m_CifgEnabled)
964  {
965  paramsInfo.m_CellToInputWeights =
966  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
967  }
968 
969  paramsInfo.m_CellToForgetWeights =
970  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
971  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
972  }
973 
974  if(descriptor.m_LayerNormEnabled)
975  {
976  if (!descriptor.m_CifgEnabled)
977  {
978  paramsInfo.m_InputLayerNormWeights =
979  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
980  }
981 
982  paramsInfo.m_ForgetLayerNormWeights =
983  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
984  paramsInfo.m_CellLayerNormWeights =
985  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
986  paramsInfo.m_OutputLayerNormWeights =
987  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
988  }
989 
990  result = layerSupportObject.IsQLstmSupported(input,
991  previousOutputIn,
992  previousCellStateIn,
993  outputStateOut,
994  cellStateOut,
995  output,
996  descriptor,
997  paramsInfo,
998  reason);
999  break;
1000  }
1002  {
1003  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1004 
1005  // Inputs
1006  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1007  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1008  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
1009 
1010  // Outputs
1011  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1012  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1013 
1014  // QuantizedLstm parameters
1015  QuantizedLstmInputParamsInfo paramsInfo;
1016 
1017  paramsInfo.m_InputToInputWeights =
1018  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1019  paramsInfo.m_InputToForgetWeights =
1020  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1021  paramsInfo.m_InputToCellWeights =
1022  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1023  paramsInfo.m_InputToOutputWeights =
1024  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1025 
1026  paramsInfo.m_RecurrentToInputWeights =
1027  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1028  paramsInfo.m_RecurrentToForgetWeights =
1029  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1030  paramsInfo.m_RecurrentToCellWeights =
1031  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1032  paramsInfo.m_RecurrentToOutputWeights =
1033  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1034 
1035  paramsInfo.m_InputGateBias =
1036  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1037  paramsInfo.m_ForgetGateBias =
1038  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1039  paramsInfo.m_CellBias =
1040  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1041  paramsInfo.m_OutputGateBias =
1042  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1043 
1044  result = layerSupportObject.IsQuantizedLstmSupported(input,
1045  previousCellStateIn,
1046  previousOutputIn,
1047  cellStateOut,
1048  output,
1049  paramsInfo,
1050  reason);
1051  break;
1052  }
1053  case LayerType::Division:
1054  {
1055  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1056  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1057  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1058  result = layerSupportObject.IsDivisionSupported(
1059  OverrideDataType(input0, dataType),
1060  OverrideDataType(input1, dataType),
1061  OverrideDataType(output, dataType),
1062  reason);
1063  break;
1064  }
1065  case LayerType::Rank:
1066  {
1067  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1068  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1069  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1070  OverrideDataType(output, dataType),
1071  reason);
1072  break;
1073  }
1074  case LayerType::Reshape:
1075  {
1076  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1077  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1078  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1079  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1080  OverrideDataType(output, dataType),
1081  cLayer->GetParameters(),
1082  reason);
1083  break;
1084  }
1085  case LayerType::Resize:
1086  {
1087  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1088  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1089  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1090  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1091  OverrideDataType(output, dataType),
1092  cLayer->GetParameters(),
1093  reason);
1094  break;
1095  }
1096  case LayerType::Shape:
1097  {
1098  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1099  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1100 
1101  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1102  OverrideDataType(output, dataType),
1103  reason);
1104  break;
1105  }
1106  case LayerType::Slice:
1107  {
1108  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1109 
1110  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1111  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1112 
1113  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1114  OverrideDataType(output, dataType),
1115  cLayer->GetParameters(),
1116  reason);
1117  break;
1118  }
1119  case LayerType::Softmax:
1120  {
1121  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1122  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1123  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1124  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1125  OverrideDataType(output, dataType),
1126  cLayer->GetParameters(),
1127  reason);
1128  break;
1129  }
1131  {
1132  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1133  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1134  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1135  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1136  OverrideDataType(output, dataType),
1137  cLayer->GetParameters(),
1138  reason);
1139  break;
1140  }
1142  {
1143  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1144 
1145  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1146  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1147 
1148  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1149  OverrideDataType(output, dataType),
1150  cLayer->GetParameters(),
1151  reason);
1152  break;
1153  }
1154  case LayerType::Splitter:
1155  {
1156  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1157  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1158 
1159  // Get vector of all outputs.
1160  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1161  {
1162  return OverrideDataType(slot.GetTensorInfo(), dataType);
1163  };
1164  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1165  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1166  std::vector<TensorInfo> outputs(beginI, endI);
1167 
1168  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1169 
1170  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1171  outputPtrs,
1172  cLayer->GetParameters(),
1173  reason);
1174  break;
1175  }
1176  case LayerType::Stack:
1177  {
1178  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1179 
1180  // Get vector of all inputs.
1181  auto getTensorInfo = [&dataType](const InputSlot& slot)
1182  {
1183  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1184  };
1185  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1186  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1187  std::vector<TensorInfo> inputs(beginI, endI);
1188 
1189  auto getTensorInfoPtr = [](const TensorInfo& info)
1190  {
1191  return &info;
1192  };
1193  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1194  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1195  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1196 
1197  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1198 
1199  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1200 
1201  break;
1202  }
1203  case LayerType::StandIn:
1204  {
1205  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1206 
1207  // Get vector of all inputs.
1208  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1209  {
1210  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1211  };
1212  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1213  {
1214  return OverrideDataType(slot.GetTensorInfo(), dataType);
1215  };
1216  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1217  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1218  std::vector<TensorInfo> inputs(beginI, endI);
1219 
1220  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1221  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1222  std::vector<TensorInfo> outputs(beginO, endO);
1223 
1224 
1225  auto getTensorInfoPtr = [](const TensorInfo& info)
1226  {
1227  return &info;
1228  };
1229  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1230  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1231  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1232 
1233  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1234  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1235  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1236 
1237 
1238  result = layerSupportObject.IsStandInSupported(inputPtrs,
1239  outputPtrs,
1240  cLayer->GetParameters(),
1241  reason);
1242  break;
1243  }
1245  {
1246  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1247  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1248  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1249  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1250  OverrideDataType(output, dataType),
1251  cLayer->GetParameters(),
1252  reason);
1253  break;
1254  }
1256  {
1257  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1258  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1259  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1260  result = layerSupportObject.IsSubtractionSupported(
1261  OverrideDataType(input0, dataType),
1262  OverrideDataType(input1, dataType),
1263  OverrideDataType(output, dataType),
1264  reason);
1265  break;
1266  }
1267  case LayerType::Switch:
1268  {
1269  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1270  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1271  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1272  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1273  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1274  OverrideDataType(input1, dataType),
1275  OverrideDataType(output0, dataType),
1276  OverrideDataType(output1, dataType),
1277  reason);
1278  break;
1279  }
1280  case LayerType::Mean:
1281  {
1282  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1283  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1284  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1285  result = layerSupportObject.IsMeanSupported(
1286  OverrideDataType(input, dataType),
1287  OverrideDataType(output, dataType),
1288  cLayer->GetParameters(),
1289  reason);
1290  break;
1291  }
1292  case LayerType::Minimum:
1293  {
1294  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1295  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1296  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1297  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1298  OverrideDataType(input1, dataType),
1299  OverrideDataType(output, dataType),
1300  reason);
1301  break;
1302  }
1303  case LayerType::Prelu:
1304  {
1305  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1306  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1307  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1308  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1309  OverrideDataType(alpha, dataType),
1310  OverrideDataType(output, dataType),
1311  reason);
1312  break;
1313  }
1314  case LayerType::Transpose:
1315  {
1316  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1317  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1318  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1319  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1320  OverrideDataType(output, dataType),
1321  cLayer->GetParameters(),
1322  reason);
1323  break;
1324  }
1326  {
1327  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1328 
1329  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1330  dataType);
1331  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1332 
1333  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1334 
1335  Optional<TensorInfo> biases;
1336  if (descriptor.m_BiasEnabled)
1337  {
1338  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1339  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1340  GetBiasTypeFromWeightsType(dataType));
1341  }
1342 
1343  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1344  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1345 
1346  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1347  output,
1348  descriptor,
1349  weights,
1350  biases,
1351  reason);
1352 
1353  break;
1354  }
1355  case LayerType::Reduce:
1356  {
1357  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1358  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1359  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1360 
1361  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1362  OverrideDataType(output, dataType),
1363  cLayer->GetParameters(),
1364  reason);
1365  break;
1366  }
1368  {
1369  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1370  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1371 
1372  // All inputs.
1373  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1374  dataType);
1375  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1376  dataType);
1377  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1378  dataType);
1379  // Outputs
1380  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1381  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1382  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1383 
1384  // Basic parameters
1385  const TensorInfo& inputToForgetWeights
1386  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1387  const TensorInfo& inputToCellWeights
1388  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1389  const TensorInfo& inputToOutputWeights
1390  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1391  const TensorInfo& recurrentToForgetWeights
1392  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1393  const TensorInfo& recurrentToCellWeights
1394  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1395  const TensorInfo& recurrentToOutputWeights
1396  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1397  const TensorInfo& forgetGateBias
1398  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1399  const TensorInfo& cellBias
1400  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1401  const TensorInfo& outputGateBias
1402  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1403 
1404  LstmInputParamsInfo paramsInfo;
1405 
1406  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1407  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1408  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1409  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1410  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1411  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1412  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1413  paramsInfo.m_CellBias = &cellBias;
1414  paramsInfo.m_OutputGateBias = &outputGateBias;
1415 
1416  // Optional parameters
1417  TensorInfo optInputToInputWeights;
1418  TensorInfo optRecurrentToInputWeights;
1419  TensorInfo optCellToInputWeights;
1420  TensorInfo optInputGateBias;
1421  TensorInfo optProjectionWeights;
1422  TensorInfo optProjectionBias;
1423  TensorInfo optCellToForgetWeights;
1424  TensorInfo optCellToOutputWeights;
1425  TensorInfo optInputLayerNormWeights;
1426  TensorInfo optForgetLayerNormWeights;
1427  TensorInfo optCellLayerNormWeights;
1428  TensorInfo optOutputLayerNormWeights;
1429 
1430  if(!descriptor.m_CifgEnabled)
1431  {
1432  optInputToInputWeights =
1433  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1434  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1435 
1436  optRecurrentToInputWeights =
1437  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1438  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1439  optInputGateBias =
1440  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1441  paramsInfo.m_InputGateBias = &optInputGateBias;
1442  }
1443 
1444  if(descriptor.m_ProjectionEnabled)
1445  {
1446  optProjectionWeights =
1447  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1448  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1449  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1450  {
1451  optProjectionBias =
1452  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1453  paramsInfo.m_ProjectionBias = &optProjectionBias;
1454  }
1455  }
1456 
1457  if(descriptor.m_PeepholeEnabled)
1458  {
1459  if(!descriptor.m_CifgEnabled)
1460  {
1461  optCellToInputWeights =
1462  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1463  dataType);
1464  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1465  }
1466  optCellToForgetWeights =
1467  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1468  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1469  optCellToOutputWeights =
1470  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1471  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1472  }
1473 
1474  if(descriptor.m_LayerNormEnabled)
1475  {
1476  if (!descriptor.m_CifgEnabled)
1477  {
1478  optInputLayerNormWeights = OverrideDataType(
1479  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1480  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1481  }
1482 
1483  optForgetLayerNormWeights = OverrideDataType(
1484  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1485  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1486 
1487  optCellLayerNormWeights = OverrideDataType(
1488  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1489  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1490 
1491  optOutputLayerNormWeights = OverrideDataType(
1492  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1493  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1494  }
1495 
1496  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1497  outputStateIn,
1498  cellStateIn,
1499  outputStateOut,
1500  cellStateOut,
1501  output,
1502  descriptor,
1503  paramsInfo,
1504  reason);
1505  break;
1506  }
1507  default:
1508  {
1509  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1510  reason.value() = "Unrecognised layer type";
1511  result = false;
1512  break;
1513  }
1514  }
1515  return result;
1516 }
1517 
1519  const IConnectableLayer& connectableLayer,
1520  Optional<DataType> dataType,
1521  std::string& outReasonIfUnsupported)
1522 {
1523  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1524 }
1525 
1527  Optional<DataType> dataType,
1528  std::string& outReasonIfUnsupported)
1529 {
1530  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1531  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1532 }
1533 
1534 // TODO merge with defaulted modelOptions above
1536  Optional<DataType> dataType,
1537  std::string& outReasonIfUnsupported,
1538  const ModelOptions& modelOptions)
1539 {
1540  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1541  return IsLayerConfigurationSupported(layer->GetBackendId(),
1542  connectableLayer,
1543  dataType,
1544  outReasonIfUnsupported,
1545  modelOptions);
1546 }
1547 
1549  const IConnectableLayer& connectableLayer,
1550  Optional<DataType> dataType,
1551  std::string& outReasonIfUnsupported,
1552  const ModelOptions& modelOptions)
1553 {
1554  return IsLayerConfigurationSupported(backendId,
1555  connectableLayer,
1556  dataType,
1557  outReasonIfUnsupported,
1558  modelOptions);
1559 }
1561 std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
1562  const QueueDescriptor& descriptor,
1563  const WorkloadInfo& info) const
1564 {
1565  switch(type)
1566  {
1567  case LayerType::Activation :
1568  {
1569  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
1570  return CreateActivation(*activationQueueDescriptor, info);
1571  }
1572  case LayerType::Addition :
1573  {
1574  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
1575  return CreateAddition(*additionQueueDescriptor, info);
1576  }
1577  case LayerType::ArgMinMax :
1578  {
1579  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
1580  return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
1581  }
1583  {
1584  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
1585  return CreateBatchNormalization(*batchNormQueueDescriptor, info);
1586  }
1588  {
1589  auto batchToSpaceNdQueueDescriptor
1590  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
1591  return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
1592  }
1593  case LayerType::Cast :
1594  {
1595  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
1596  return CreateCast(*castQueueDescriptor, info);
1597  }
1599  {
1600  auto channelShuffleQueueDescriptor
1601  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
1602  return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
1603  }
1604  case LayerType::Comparison :
1605  {
1606  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
1607  return CreateComparison(*comparisonQueueDescriptor, info);
1608  }
1609  case LayerType::Concat :
1610  {
1611  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
1612  return CreateConcat(*concatQueueDescriptor, info);
1613  }
1614  case LayerType::Constant :
1615  {
1616  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
1617  return CreateConstant(*constantQueueDescriptor, info);
1618  }
1620  {
1621  auto convertFp16ToFp32QueueDescriptor
1622  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
1623  return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
1624  }
1626  {
1627  auto convertFp32ToFp16QueueDescriptor
1628  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
1629  return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
1630  }
1632  {
1633  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
1634  return CreateConvolution2d(*convolution2dQueueDescriptor, info);
1635  }
1637  {
1638  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
1639  return CreateConvolution3d(*convolution3dQueueDescriptor, info);
1640  }
1641  case LayerType::Debug:
1642  {
1643  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
1644  return CreateDebug(*debugQueueDescriptor, info);
1645  }
1647  {
1648  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
1649  return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
1650  }
1652  {
1653  auto depthwiseConvolution2DQueueDescriptor
1654  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
1655  return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
1656  }
1657  case LayerType::Dequantize:
1658  {
1659  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
1660  return CreateDequantize(*dequantizeQueueDescriptor, info);
1661  }
1663  {
1664  auto detectionPostProcessQueueDescriptor
1665  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
1666  return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
1667  }
1668  case LayerType::Division:
1669  {
1670  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
1671  return CreateDivision(*divisionQueueDescriptor, info);
1672  }
1674  {
1675  auto elementwiseUnaryQueueDescriptor
1676  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
1677  return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
1678 
1679  }
1681  {
1682  auto fakeQuantizationQueueDescriptor
1683  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
1684  return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
1685  }
1686  case LayerType::Fill:
1687  {
1688  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
1689  return CreateFill(*fillQueueDescriptor, info);
1690  }
1691  case LayerType::Floor:
1692  {
1693  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
1694  return CreateFloor(*floorQueueDescriptor, info);
1695  }
1697  {
1698  auto fullyConnectedQueueDescriptor
1699  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
1700  return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
1701  }
1702  case LayerType::Gather:
1703  {
1704  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
1705  return CreateGather(*gatherQueueDescriptor, info);
1706  }
1707  case LayerType::Input:
1708  {
1709  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
1710  return CreateInput(*inputQueueDescriptor, info);
1711  }
1713  {
1714  auto instanceNormalizationQueueDescriptor
1715  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
1716  return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
1717  }
1719  {
1720  auto l2NormalizationQueueDescriptor
1721  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
1722  return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
1723  }
1725  {
1726  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
1727  return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
1728  }
1729  case LayerType::LogSoftmax:
1730  {
1731  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
1732  return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
1733  }
1734  case LayerType::Lstm:
1735  {
1736  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
1737  return CreateLstm(*lstmQueueDescriptor, info);
1738  }
1739  case LayerType::Maximum:
1740  {
1741  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
1742  return CreateMaximum(*maximumQueueDescriptor, info);
1743  }
1744  case LayerType::Mean:
1745  {
1746  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
1747  return CreateMean(*meanQueueDescriptor, info);
1748  }
1749  case LayerType::MemCopy:
1750  {
1751  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
1752  return CreateMemCopy(*memCopyQueueDescriptor, info);
1753  }
1754  case LayerType::MemImport:
1755  {
1756  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
1757  return CreateMemImport(*memImportQueueDescriptor, info);
1758  }
1759  case LayerType::Minimum:
1760  {
1761  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
1762  return CreateMinimum(*minimumQueueDescriptor, info);
1763  }
1765  {
1766  auto multiplicationQueueDescriptor
1767  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
1768  return CreateMultiplication(*multiplicationQueueDescriptor, info);
1769  }
1771  {
1772  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
1773  return CreateNormalization(*normalizationQueueDescriptor, info);
1774  }
1775  case LayerType::Output:
1776  {
1777  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
1778  return CreateOutput(*outputQueueDescriptor, info);
1779  }
1780  case LayerType::Pad:
1781  {
1782  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
1783  return CreatePad(*padQueueDescriptor, info);
1784  }
1785  case LayerType::Permute:
1786  {
1787  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
1788  return CreatePermute(*permuteQueueDescriptor, info);
1789  }
1790  case LayerType::Pooling2d:
1791  {
1792  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
1793  return CreatePooling2d(*pooling2dQueueDescriptor, info);
1794  }
1795  case LayerType::Pooling3d:
1796  {
1797  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
1798  return CreatePooling3d(*pooling3dQueueDescriptor, info);
1799  }
1801  {
1802  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
1803  return CreatePreCompiled(*preCompiledQueueDescriptor, info);
1804  }
1805  case LayerType::Prelu:
1806  {
1807  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
1808  return CreatePrelu(*preluQueueDescriptor, info);
1809  }
1810  case LayerType::QLstm:
1811  {
1812  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
1813  return CreateQLstm(*qlstmQueueDescriptor, info);
1814  }
1815  case LayerType::Quantize:
1816  {
1817  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
1818  return CreateQuantize(*quantizeQueueDescriptor, info);
1819  }
1820  case LayerType::Rank:
1821  {
1822  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
1823  return CreateRank(*rankQueueDescriptor, info);
1824  }
1825  case LayerType::Reduce:
1826  {
1827  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
1828  return CreateReduce(*reduceQueueDescriptor, info);
1829  }
1830  case LayerType::Reshape:
1831  {
1832  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
1833  return CreateReshape(*reshapeQueueDescriptor, info);
1834  }
1835  case LayerType::Resize:
1836  {
1837  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
1838  return CreateResize(*resizeQueueDescriptor, info);
1839  }
1840  case LayerType::Shape:
1841  {
1842  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
1843  return CreateShape(*shapeQueueDescriptor, info);
1844  }
1845  case LayerType::Slice:
1846  {
1847  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
1848  return CreateSlice(*sliceQueueDescriptor, info);
1849  }
1850  case LayerType::Softmax:
1851  {
1852  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
1853  return CreateSoftmax(*softmaxQueueDescriptor, info);
1854  }
1856  {
1857  auto spaceToBatchNdQueueDescriptor
1858  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
1859  return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
1860  }
1862  {
1863  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
1864  return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
1865  }
1866  case LayerType::Splitter:
1867  {
1868  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
1869  return CreateSplitter(*splitterQueueDescriptor, info);
1870  }
1871  case LayerType::Stack:
1872  {
1873  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
1874  return CreateStack(*stackQueueDescriptor, info);
1875  }
1877  {
1878  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
1879  return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
1880  }
1882  {
1883  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
1884  return CreateSubtraction(*subtractionQueueDescriptor, info);
1885  }
1886  case LayerType::Transpose:
1887  {
1888  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
1889  return CreateTranspose(*transposeQueueDescriptor, info);
1890  }
1892  {
1893  auto transposeConvolution2dQueueDescriptor
1894  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
1895  return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
1896  }
1898  {
1899  auto unidirectionalSequenceLstmQueueDescriptor
1900  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
1901  return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
1902  }
1903  default:
1904  return nullptr;
1905  }
1906 }
1908 
1909 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1910  const WorkloadInfo& /*info*/) const
1911 {
1912  return std::unique_ptr<IWorkload>();
1913 }
1914 
1915 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1916  const WorkloadInfo& /*info*/) const
1917 {
1918  return std::unique_ptr<IWorkload>();
1919 }
1920 
1921 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1922  const WorkloadInfo& /*info*/) const
1923 {
1924  return std::unique_ptr<IWorkload>();
1925 }
1926 
1927 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1928  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1929 {
1930  return std::unique_ptr<IWorkload>();
1931 }
1932 
1933 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1934  const WorkloadInfo& /*Info*/) const
1935 {
1936  return std::unique_ptr<IWorkload>();
1937 }
1938 
1939 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1940  const WorkloadInfo& /*info*/) const
1941 {
1942  return std::unique_ptr<IWorkload>();
1943 }
1944 
1945 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1946  const WorkloadInfo& /*info*/) const
1947 {
1948  return std::unique_ptr<IWorkload>();
1949 }
1950 
1951 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1952  const WorkloadInfo& /*info*/) const
1953 {
1954  return std::unique_ptr<IWorkload>();
1955 }
1956 
1957 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1958  const WorkloadInfo& /*info*/) const
1959 {
1960  return std::unique_ptr<IWorkload>();
1961 }
1962 
1963 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1964  const WorkloadInfo& /*info*/) const
1965 {
1966  return std::unique_ptr<IWorkload>();
1967 }
1968 
1969 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
1970  const WorkloadInfo& /*info*/) const
1971 {
1972  return std::unique_ptr<IWorkload>();
1973 }
1974 
1975 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
1976  const WorkloadInfo& /*info*/) const
1977 {
1978  return std::unique_ptr<IWorkload>();
1979 }
1980 
1981 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1982  const WorkloadInfo& /*info*/) const
1983 {
1984  return std::unique_ptr<IWorkload>();
1985 }
1986 
1987 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
1988  const WorkloadInfo& /*info*/) const
1989 {
1990  return std::unique_ptr<IWorkload>();
1991 }
1992 
1993 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1994  const WorkloadInfo& /*info*/) const
1995 {
1996  return std::unique_ptr<IWorkload>();
1997 }
1998 
1999 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
2000  const WorkloadInfo& /*info*/) const
2001 {
2002  return std::unique_ptr<IWorkload>();
2003 }
2004 
2005 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
2006  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2007 {
2008  return std::unique_ptr<IWorkload>();
2009 }
2010 
2011 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
2012  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2013 {
2014  return std::unique_ptr<IWorkload>();
2015 }
2016 
2017 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
2018  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2019 {
2020  return std::unique_ptr<IWorkload>();
2021 }
2022 
2023 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
2024  const WorkloadInfo& /*info*/) const
2025 {
2026  return std::unique_ptr<IWorkload>();
2027 }
2028 
2029 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2030  const WorkloadInfo& /*info*/) const
2031 {
2032  return std::unique_ptr<IWorkload>();
2033 }
2034 
2035 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
2036  const WorkloadInfo& /*info*/) const
2037 {
2038  return std::unique_ptr<IWorkload>();
2039 }
2040 
2041 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
2042  const WorkloadInfo& /*info*/) const
2043 {
2044  return std::unique_ptr<IWorkload>();
2045 }
2046 
2047 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
2048  const WorkloadInfo& /*info*/) const
2049 {
2050  return std::unique_ptr<IWorkload>();
2051 }
2052 
2053 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
2054  const WorkloadInfo& /*info*/) const
2055 {
2056  return std::unique_ptr<IWorkload>();
2057 }
2058 
2059 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
2060  const WorkloadInfo& /*info*/) const
2061 {
2062  return std::unique_ptr<IWorkload>();
2063 }
2064 
2065 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
2066  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
2067  const WorkloadInfo& /*info*/) const
2068 {
2069  return std::unique_ptr<IWorkload>();
2070 }
2071 
2072 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
2073  const WorkloadInfo& /*info*/) const
2074 {
2075  return std::unique_ptr<IWorkload>();
2076 }
2077 
2078 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
2079  const WorkloadInfo& /*info*/) const
2080 {
2081  return std::unique_ptr<IWorkload>();
2082 }
2083 
2084 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2085  const WorkloadInfo& /*info*/) const
2086 {
2087  return std::unique_ptr<IWorkload>();
2088 }
2089 
2090 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
2091  const WorkloadInfo& /*info*/) const
2092 {
2093  return std::unique_ptr<IWorkload>();
2094 }
2095 
2096 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
2097  const WorkloadInfo& /*info*/) const
2098 {
2099  return std::unique_ptr<IWorkload>();
2100 }
2101 
2102 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
2103  const WorkloadInfo& /*info*/) const
2104 {
2105  return std::unique_ptr<IWorkload>();
2106 }
2107 
2108 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
2109  const WorkloadInfo& /*Info*/) const
2110 {
2111  return std::unique_ptr<IWorkload>();
2112 }
2113 
2114 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
2115  const WorkloadInfo& /*info*/) const
2116 {
2117  return std::unique_ptr<IWorkload>();
2118 }
2119 
2120 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
2121  const WorkloadInfo& /*info*/) const
2122 {
2123  return std::unique_ptr<IWorkload>();
2124 }
2125 
2126 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
2127  const WorkloadInfo& /*info*/) const
2128 {
2129  return std::unique_ptr<IWorkload>();
2130 }
2131 
2132 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
2133  const WorkloadInfo& /*info*/) const
2134 {
2135  return std::unique_ptr<IWorkload>();
2136 }
2137 
2138 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
2139  const WorkloadInfo& /*info*/) const
2140 {
2141  return std::unique_ptr<IWorkload>();
2142 }
2143 
2144 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
2145  const WorkloadInfo& /*info*/) const
2146 {
2147  return std::unique_ptr<IWorkload>();
2148 }
2149 
2150 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
2151  const WorkloadInfo& /*info*/) const
2152 {
2153  return std::unique_ptr<IWorkload>();
2154 }
2155 
2156 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
2157  const WorkloadInfo& /*Info*/) const
2158 {
2159  return std::unique_ptr<IWorkload>();
2160 }
2161 
2162 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
2163  const WorkloadInfo& /*info*/) const
2164 {
2165  return std::unique_ptr<IWorkload>();
2166 }
2167 
2168 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
2169  const WorkloadInfo& /*info*/) const
2170 {
2171  return std::unique_ptr<IWorkload>();
2172 }
2173 
2174 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/,
2175  const WorkloadInfo& /*info*/) const
2176 {
2177  return std::unique_ptr<IWorkload>();
2178 }
2179 
2180 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
2181  const WorkloadInfo& /*info*/) const
2182 {
2183  return std::unique_ptr<IWorkload>();
2184 }
2185 
2186 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
2187  const WorkloadInfo &/*info*/) const
2188 {
2189  return std::unique_ptr<IWorkload>();
2190 }
2191 
2192 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
2193  const WorkloadInfo& /*Info*/) const
2194 {
2195  return std::unique_ptr<IWorkload>();
2196 }
2197 
2198 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
2199  const WorkloadInfo& /*info*/) const
2200 {
2201  return std::unique_ptr<IWorkload>();
2202 }
2203 
2204 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
2205  const WorkloadInfo& /*info*/) const
2206 {
2207  return std::unique_ptr<IWorkload>();
2208 }
2209 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
2210  const WorkloadInfo& /*info*/) const
2211 {
2212  return std::unique_ptr<IWorkload>();
2213 }
2214 
2215 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
2216  const WorkloadInfo& /*info*/) const
2217 {
2218  return std::unique_ptr<IWorkload>();
2219 }
2220 
2221 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
2222  const WorkloadInfo& /*info*/) const
2223 {
2224  return std::unique_ptr<IWorkload>();
2225 }
2226 
2227 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
2228  const WorkloadInfo& /*info*/) const
2229 {
2230  return std::unique_ptr<IWorkload>();
2231 }
2232 
2233 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
2234  const WorkloadInfo& /*info*/) const
2235 {
2236  return std::unique_ptr<IWorkload>();
2237 }
2238 
2239 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
2240  const WorkloadInfo& /*info*/) const
2241 {
2242  return std::unique_ptr<IWorkload>();
2243 }
2244 
2245 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
2246  const WorkloadInfo& /*info*/) const
2247 {
2248  return std::unique_ptr<IWorkload>();
2249 }
2250 
2251 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
2252  const WorkloadInfo& /*info*/) const
2253 {
2254  return std::unique_ptr<IWorkload>();
2255 }
2256 
2257 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
2258  const WorkloadInfo& /*info*/) const
2259 {
2260  return std::unique_ptr<IWorkload>();
2261 }
2262 
2263 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
2264  const WorkloadInfo& /*info*/) const
2265 {
2266  return std::unique_ptr<IWorkload>();
2267 }
2268 
2269 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
2270  const WorkloadInfo& /*info*/) const
2271 {
2272  return std::unique_ptr<IWorkload>();
2273 }
2274 
2275 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
2276  const WorkloadInfo& /*info*/) const
2277 {
2278  return std::unique_ptr<IWorkload>();
2279 }
2280 
2281 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
2282  const WorkloadInfo& /*info*/) const
2283 {
2284  return std::unique_ptr<IWorkload>();
2285 }
2286 
2287 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
2288  const WorkloadInfo& /*info*/) const
2289 {
2290  return std::unique_ptr<IWorkload>();
2291 }
2292 
2293 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
2294  const WorkloadInfo& /*info*/) const
2295 {
2296  return std::unique_ptr<IWorkload>();
2297 }
2298 
2299 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
2300  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
2301  const WorkloadInfo& /*info*/) const
2302 {
2303  return std::unique_ptr<IWorkload>();
2304 }
2305 
2306 std::unique_ptr<IWorkload> IWorkloadFactory::CreateUnidirectionalSequenceLstm(
2307  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
2308  const WorkloadInfo& /*info*/) const
2309 {
2310  return std::unique_ptr<IWorkload>();
2311 }
2312 
2313 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInput(
2314  const InputQueueDescriptor& /*descriptor*/,
2315  const WorkloadInfo& /*info*/) const
2316 {
2317  return std::unique_ptr<IWorkload>();
2318 }
2319 
2320 } // namepsace armnn
armnn::LayerType::Floor
@ Floor
armnn::LayerType::MemCopy
@ MemCopy
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Transpose
@ Transpose
armnn::BackendRegistryInstance
BackendRegistry & BackendRegistryInstance()
Definition: BackendRegistry.cpp:15
TransformIterator.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::DataType::QAsymmU8
@ QAsymmU8
ILayerSupport.hpp
IBackendInternal.hpp
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Quantize
@ Quantize
BackendHelper.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Merge
@ Merge
TensorHandle.hpp
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
WorkloadFactory.hpp
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::Division
@ Division
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Normalization
@ Normalization
armnn::UnidirectionalSequenceLstmDescriptor
LstmDescriptor UnidirectionalSequenceLstmDescriptor
Definition: Descriptors.hpp:1116
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:466
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Rank
@ Rank
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::InputQueueDescriptor
MemCopyQueueDescriptor InputQueueDescriptor
Definition: WorkloadData.hpp:91
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::DataType::Float32
@ Float32
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1518
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:14
armnn::LayerType::GatherNd
@ GatherNd
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
Layer.hpp
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::StandIn
@ StandIn
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
BackendRegistry.hpp
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::LayerType::Mean
@ Mean
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::LayerType::Switch
@ Switch
armnn::Optional
Definition: Optional.hpp:270
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Definition: WorkloadFactory.cpp:1561
armnn::DataType::QSymmS8
@ QSymmS8
armnn::ActivationQueueDescriptor
Definition: WorkloadData.hpp:158
armnn::LayerType::Concat
@ Concat
armnn::MakeTransformIterator
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
Definition: TransformIterator.hpp:81
armnn::DataType::QSymmS16
@ QSymmS16
armnn::LayerType::Cast
@ Cast
LayersFwd.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::LogSoftmax
@ LogSoftmax
Types.hpp
armnn::LayerType::Output
@ Output
armnn::DataType::Boolean
@ Boolean
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::Dequantize
@ Dequantize
armnn::OutputQueueDescriptor
MemCopyQueueDescriptor OutputQueueDescriptor
Definition: WorkloadData.hpp:92
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::PreCompiled
@ PreCompiled
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34