ArmNN
 22.11
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
20 #include <sstream>
21 
22 namespace armnn
23 {
24 
25 namespace
26 {
27 using LayerList = std::list<Layer*>;
28 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
29 
30 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
31 {
32  if (!type)
33  {
34  return info;
35  }
36 
37  return TensorInfo(info.GetShape(),
38  type.value(),
39  info.GetQuantizationScale(),
40  info.GetQuantizationOffset(),
41  info.IsConstant());
42 }
43 
44 } // anonymous namespace
45 
47 {
48  if (!weightsType)
49  {
50  return weightsType;
51  }
52 
53  switch(weightsType.value())
54  {
58  return weightsType;
64  default:
65  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
66  }
67  return armnn::EmptyOptional();
68 }
69 
70 
71 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
72  const IConnectableLayer& connectableLayer,
73  Optional<DataType> dataType,
74  std::string& outReasonIfUnsupported,
75  const ModelOptions& modelOptions)
76 {
77  Optional<std::string&> reason = outReasonIfUnsupported;
78  bool result;
79  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
80 
81  auto const& backendRegistry = BackendRegistryInstance();
82  if (!backendRegistry.IsBackendRegistered(backendId))
83  {
84  std::stringstream ss;
85  ss << connectableLayer.GetName() << " is not supported on " << backendId
86  << " because this backend is not registered.";
87 
88  outReasonIfUnsupported = ss.str();
89  return false;
90  }
91 
92  auto backendFactory = backendRegistry.GetFactory(backendId);
93  auto backendObject = backendFactory();
94  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
112  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
114  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
115  result = layerSupportObject.IsAdditionSupported(
116  OverrideDataType(input0, dataType),
117  OverrideDataType(input1, dataType),
118  OverrideDataType(output, dataType),
119  reason);
120  break;
121  }
123  {
124  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
125  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
126 
127  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
128  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
129  result = layerSupportObject.IsArgMinMaxSupported(
130  OverrideDataType(input, dataType),
131  OverrideDataType(output, DataType::Signed32),
132  descriptor,
133  reason);
134  break;
135  }
137  {
138  auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
139  const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
140 
141  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
142  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
143  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
144  result = layerSupportObject.IsBatchMatMulSupported(
145  OverrideDataType(input0, dataType),
146  OverrideDataType(input1, dataType),
147  OverrideDataType(output, dataType),
148  descriptor,
149  reason);
150  break;
151  }
153  {
154  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
155  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
156  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
157  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
158  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
159  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
160  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
161  result = layerSupportObject.IsBatchNormalizationSupported(
162  OverrideDataType(input, dataType),
163  OverrideDataType(output, dataType),
164  OverrideDataType(mean, dataType),
165  OverrideDataType(var, dataType),
166  OverrideDataType(beta, dataType),
167  OverrideDataType(gamma, dataType),
168  cLayer->GetParameters(),
169  reason);
170  break;
171  }
173  {
174  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
175  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
176  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
177 
178  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
179  OverrideDataType(output, dataType),
180  cLayer->GetParameters(),
181  reason);
182  break;
183  }
184  case LayerType::Cast:
185  {
186  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
187  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
188 
189  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
190  OverrideDataType(output, dataType),
191  reason);
192  break;
193  }
195  {
196  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
197 
198  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
199  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
200 
201  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
202 
203  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
204  OverrideDataType(output, dataType),
205  descriptor,
206  reason);
207  break;
208  }
210  {
211  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
212 
213  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
214  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
215  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
216 
217  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
218  OverrideDataType(input1, dataType),
219  OverrideDataType(output, DataType::Boolean),
220  cLayer->GetParameters(),
221  reason);
222  break;
223  }
224  case LayerType::Constant:
225  {
226  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
227  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
228  break;
229  }
231  {
232  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
233  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
234  result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
235  break;
236  }
238  {
239  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
240  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
241  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
242  break;
243  }
245  {
246  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
247  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
248  result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
249  break;
250  }
252  {
253  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
254  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
255  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
256  break;
257  }
259  {
260  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
261 
262  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
263  dataType);
264  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
265  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
266  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
267  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
268  dataType);
269 
270  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
271 
272  // Construct optional biases object based on the value of m_BiasEnabled
273  Optional<TensorInfo> biases;
274  if (descriptor.m_BiasEnabled)
275  {
276  ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
277  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
278  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
279  GetBiasTypeFromWeightsType(dataType));
280  }
281 
282  result = layerSupportObject.IsConvolution2dSupported(
283  input,
284  output,
285  descriptor,
286  weights,
287  biases,
288  reason);
289  break;
290  }
292  {
293  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
294 
295  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
296  dataType);
297  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
298 
299  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
300  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
301  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
302  dataType);
303 
304  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
305 
306  // Construct optional biases object based on the value of m_BiasEnabled
307  Optional<TensorInfo> biases;
308  if (descriptor.m_BiasEnabled)
309  {
310  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
311  GetBiasTypeFromWeightsType(dataType));
312  }
313 
314  result = layerSupportObject.IsConvolution3dSupported(
315  input,
316  output,
317  descriptor,
318  weights,
319  biases,
320  reason);
321  break;
322  }
323  case LayerType::Debug:
324  {
325  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
326  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
327 
328  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
329  OverrideDataType(output, dataType),
330  reason);
331  break;
332  }
334  {
335  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
336 
337  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
338  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
339 
340  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
341  OverrideDataType(output, dataType),
342  cLayer->GetParameters(),
343  reason);
344  break;
345  }
347  {
348  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
349  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
350  dataType);
351  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
352  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
353  dataType);
354 
355  ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
356 
357  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
358 
359  // Construct optional biases object based on the value of m_BiasEnabled
360  Optional<TensorInfo> biases;
361  if (descriptor.m_BiasEnabled)
362  {
363  biases = OverrideDataType(cLayer->GetInputSlot(2).GetConnection()->GetTensorInfo(),
364  GetBiasTypeFromWeightsType(dataType));
365  }
366 
367  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
368  output,
369  descriptor,
370  weights,
371  biases,
372  reason);
373  break;
374  }
376  {
377  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
378  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
379 
380  result = layerSupportObject.IsDequantizeSupported(input,
381  OverrideDataType(output, dataType),
382  reason);
383  break;
384  }
386  {
387  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
388  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
389  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
390  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
391 
392  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
393  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
394  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
395  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
396 
397  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
398  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
399  scores,
400  anchors,
401  detectionBoxes,
402  detectionClasses,
403  detectionScores,
404  numDetections,
405  descriptor,
406  reason);
407  break;
408  }
410  {
411  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
412 
413  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
414  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
415 
416  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
417  OverrideDataType(output, dataType),
418  cLayer->GetParameters(),
419  reason);
420  break;
421  }
422  case LayerType::Fill:
423  {
424  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
425  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
426  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
427  const FillDescriptor& descriptor = cLayer->GetParameters();
428 
429  result = layerSupportObject.IsFillSupported(
430  OverrideDataType(input, dataType),
431  OverrideDataType(output, dataType),
432  descriptor,
433  reason);
434  break;
435  }
437  {
438  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
439  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
440  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
441  cLayer->GetParameters(),
442  reason);
443  break;
444  }
445  case LayerType::Floor:
446  {
447  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
448  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
449  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
450  OverrideDataType(output, dataType),
451  reason);
452  break;
453  }
455  {
456  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
457  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
458  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
459 
460  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
461  TensorInfo weightsInfo;
462  const TensorInfo* weightsInfoPtr = nullptr;
463 
464  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
465  weightsInfoPtr = &weightsInfo;
466 
467  TensorInfo biasInfo;
468  const TensorInfo* biasInfoPtr = nullptr;
469  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
470  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
471  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
472  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
473 
474  if (descriptor.m_BiasEnabled)
475  {
476  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
477  biasInfoPtr = &biasInfo;
478  }
479  else
480  {
481  // If biases are not enabled pass a dummy tensorinfo for the validation
482  switch(input.GetDataType())
483  {
484  case DataType::BFloat16:
485  {
486  biasInfoPtr = &dummyBFloat16Bias;
487  break;
488  }
489  case DataType::Float16:
490  {
491  biasInfoPtr = &dummyFloat16Bias;
492  break;
493  }
494  case DataType::Float32:
495  {
496  biasInfoPtr = &dummyFloat32Bias;
497  break;
498  }
499  case DataType::QAsymmU8:
500  case DataType::QAsymmS8:
501  case DataType::QSymmS8:
502  case DataType::QSymmS16:
503  {
504  biasInfoPtr = &dummyQA8Bias;
505  break;
506  }
507  default:
508  {
509  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
510  }
511  }
512  }
513  result = layerSupportObject.IsFullyConnectedSupported(
514  OverrideDataType(input, dataType),
515  OverrideDataType(output, dataType),
516  *weightsInfoPtr,
517  *biasInfoPtr,
518  descriptor,
519  reason);
520  break;
521  }
522  case LayerType::Gather:
523  {
524  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
525  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
526  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
527  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
528  const GatherDescriptor& descriptor = cLayer->GetParameters();
529  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
530  input1,
531  OverrideDataType(output, dataType),
532  descriptor,
533  reason);
534  break;
535  }
536  case LayerType::GatherNd:
537  {
538  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
539  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
540  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
541  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
542  input1,
543  OverrideDataType(output, dataType),
544  reason);
545  break;
546  }
547  case LayerType::Input:
548  {
549  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
550  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
551  break;
552  }
554  {
555  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
556  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
557 
558  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
559  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
560 
561  result = layerSupportObject.IsInstanceNormalizationSupported(
562  OverrideDataType(input, dataType),
563  OverrideDataType(output, dataType),
564  descriptor,
565  reason);
566  break;
567  }
569  {
570  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
571  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
572 
573  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
574  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
575 
576  result = layerSupportObject.IsL2NormalizationSupported(
577  OverrideDataType(input, dataType),
578  OverrideDataType(output, dataType),
579  descriptor,
580  reason);
581  break;
582  }
584  {
585  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
586 
587  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
588  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
589  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
590 
591  result = layerSupportObject.IsLogicalBinarySupported(input0,
592  input1,
593  output,
594  cLayer->GetParameters(),
595  reason);
596  break;
597  }
599  {
600  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
601 
602  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
603  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
604 
605  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
606  OverrideDataType(output, dataType),
607  cLayer->GetParameters(),
608  reason);
609  break;
610  }
611  case LayerType::Lstm:
612  {
613  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
614  const LstmDescriptor& descriptor = cLayer->GetParameters();
615 
616  // All inputs.
617  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
618  dataType);
619  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
620  dataType);
621  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
622  dataType);
623  // All outputs
624  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
625  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
626  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
627  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
628 
629  // Basic parameters
630  const TensorInfo& inputToForgetWeights
631  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
632  const TensorInfo& inputToCellWeights
633  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
634  const TensorInfo& inputToOutputWeights
635  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
636  const TensorInfo& recurrentToForgetWeights
637  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
638  const TensorInfo& recurrentToCellWeights
639  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
640  const TensorInfo& recurrentToOutputWeights
641  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
642  const TensorInfo& forgetGateBias
643  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
644  const TensorInfo& cellBias
645  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
646  const TensorInfo& outputGateBias
647  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
648 
649  LstmInputParamsInfo paramsInfo;
650 
651  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
652  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
653  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
654  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
655  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
656  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
657  paramsInfo.m_ForgetGateBias = &forgetGateBias;
658  paramsInfo.m_CellBias = &cellBias;
659  paramsInfo.m_OutputGateBias = &outputGateBias;
660 
661 
662  // Optional parameters
663  TensorInfo optInputToInputWeights;
664  TensorInfo optRecurrentToInputWeights;
665  TensorInfo optCellToInputWeights;
666  TensorInfo optInputGateBias;
667  TensorInfo optProjectionWeights;
668  TensorInfo optProjectionBias;
669  TensorInfo optCellToForgetWeights;
670  TensorInfo optCellToOutputWeights;
671  TensorInfo optInputLayerNormWeights;
672  TensorInfo optForgetLayerNormWeights;
673  TensorInfo optCellLayerNormWeights;
674  TensorInfo optOutputLayerNormWeights;
675 
676  if(!descriptor.m_CifgEnabled)
677  {
678  optInputToInputWeights =
679  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
680  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
681 
682  optRecurrentToInputWeights =
683  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
684  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
685  optInputGateBias =
686  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
687  paramsInfo.m_InputGateBias = &optInputGateBias;
688  }
689 
690  if(descriptor.m_ProjectionEnabled)
691  {
692  optProjectionWeights =
693  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
694  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
695  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
696  {
697  optProjectionBias =
698  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
699  paramsInfo.m_ProjectionBias = &optProjectionBias;
700  }
701  }
702 
703  if(descriptor.m_PeepholeEnabled)
704  {
705  if(!descriptor.m_CifgEnabled)
706  {
707  optCellToInputWeights =
708  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
709  dataType);
710  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
711  }
712  optCellToForgetWeights =
713  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
714  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
715  optCellToOutputWeights =
716  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
717  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
718  }
719 
720  if(descriptor.m_LayerNormEnabled)
721  {
722  if (!descriptor.m_CifgEnabled)
723  {
724  optInputLayerNormWeights = OverrideDataType(
725  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
726  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
727  }
728 
729  optForgetLayerNormWeights = OverrideDataType(
730  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
731  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
732 
733  optCellLayerNormWeights = OverrideDataType(
734  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
735  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
736 
737  optOutputLayerNormWeights = OverrideDataType(
738  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
739  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
740  }
741 
742  result = layerSupportObject.IsLstmSupported(
743  input,
744  outputStateIn,
745  cellStateIn,
746  scratchBuffer,
747  outputStateOut,
748  cellStateOut,
749  output,
750  descriptor,
751  paramsInfo,
752  reason);
753  break;
754  }
755  case LayerType::Maximum:
756  {
757  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
758  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
759  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
760 
761  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
762  OverrideDataType(input1, dataType),
763  OverrideDataType(output, dataType),
764  reason);
765  break;
766  }
767  case LayerType::MemCopy:
768  {
769  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
770  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
771 
772  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
773  OverrideDataType(output, dataType),
774  reason);
775  break;
776  }
778  {
779  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
780  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
781 
782  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
783  OverrideDataType(output, dataType),
784  reason);
785  break;
786  }
787  case LayerType::Merge:
788  {
789  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
790  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
791  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
792 
793  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
794  OverrideDataType(input1, dataType),
795  OverrideDataType(output, dataType),
796  reason);
797  break;
798  }
799  case LayerType::Concat:
800  {
801  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
802 
803  // Get vector of all inputs.
804  auto getTensorInfo = [&dataType](const InputSlot& slot)
805  {
806  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
807  };
808 
809  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
810  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
811  std::vector<TensorInfo> inputs(beginI, endI);
812 
813  auto getTensorInfoPtr = [](const TensorInfo& info)
814  {
815  return &info;
816  };
817 
818  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
819  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
820  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
821 
822  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
823 
824  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
825 
826 
827  break;
828  }
830  {
831  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
832  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
833  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
834  result = layerSupportObject.IsMultiplicationSupported(
835  OverrideDataType(input0, dataType),
836  OverrideDataType(input1, dataType),
837  OverrideDataType(output, dataType),
838  reason);
839  break;
840  }
842  {
843  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
844  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
845  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
846  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
847  OverrideDataType(output, dataType),
848  cLayer->GetParameters(),
849  reason);
850  break;
851  }
852  case LayerType::Output:
853  {
854  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
855  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
856  break;
857  }
858  case LayerType::Permute:
859  {
860  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
861  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
862  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
863  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
864  OverrideDataType(output, dataType),
865  cLayer->GetParameters(),
866  reason);
867  break;
868  }
869  case LayerType::Pad:
870  {
871  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
872  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
873  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
874  result = layerSupportObject.IsPadSupported(
875  OverrideDataType(input, dataType),
876  OverrideDataType(output, dataType),
877  cLayer->GetParameters(),
878  reason);
879  break;
880  }
882  {
883  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
884  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
885  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
886  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
887  OverrideDataType(output, dataType),
888  cLayer->GetParameters(),
889  reason);
890  break;
891  }
893  {
894  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
895  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
896  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
897  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
898  OverrideDataType(output, dataType),
899  cLayer->GetParameters(),
900  reason);
901  break;
902  }
904  {
905  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
906  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
907  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
908  cLayer->GetParameters(),
909  reason);
910  break;
911  }
912  case LayerType::Quantize:
913  {
914  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
915  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
916  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
917  break;
918  }
919  case LayerType::QLstm:
920  {
921  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
922  const QLstmDescriptor& descriptor = cLayer->GetParameters();
923 
924  // Inputs
925  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
926  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
927  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
928 
929  // Outputs
930  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
931  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
932  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
933 
934  // Lstm parameters
935  LstmInputParamsInfo paramsInfo;
936 
937  // Basic parameters
938  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
939  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
940  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
941  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
942  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
943  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
944 
945  paramsInfo.m_RecurrentToForgetWeights =
946  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
947  paramsInfo.m_RecurrentToCellWeights =
948  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
949  paramsInfo.m_RecurrentToOutputWeights =
950  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
951 
952  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
953  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
954  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
955 
956  if(!descriptor.m_CifgEnabled)
957  {
958  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
959  paramsInfo.m_RecurrentToInputWeights =
960  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
961  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
962  }
963 
964  if(descriptor.m_ProjectionEnabled)
965  {
966  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
967 
968  // Projection bias is optional even if projection is enabled
969  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
970  {
971  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
972  }
973  }
974 
975  if(descriptor.m_PeepholeEnabled)
976  {
977  if (!descriptor.m_CifgEnabled)
978  {
979  paramsInfo.m_CellToInputWeights =
980  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
981  }
982 
983  paramsInfo.m_CellToForgetWeights =
984  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
985  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
986  }
987 
988  if(descriptor.m_LayerNormEnabled)
989  {
990  if (!descriptor.m_CifgEnabled)
991  {
992  paramsInfo.m_InputLayerNormWeights =
993  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
994  }
995 
996  paramsInfo.m_ForgetLayerNormWeights =
997  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
998  paramsInfo.m_CellLayerNormWeights =
999  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
1000  paramsInfo.m_OutputLayerNormWeights =
1001  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
1002  }
1003 
1004  result = layerSupportObject.IsQLstmSupported(input,
1005  previousOutputIn,
1006  previousCellStateIn,
1007  outputStateOut,
1008  cellStateOut,
1009  output,
1010  descriptor,
1011  paramsInfo,
1012  reason);
1013  break;
1014  }
1016  {
1017  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1018 
1019  // Inputs
1020  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1021  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1022  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
1023 
1024  // Outputs
1025  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1026  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1027 
1028  // QuantizedLstm parameters
1029  QuantizedLstmInputParamsInfo paramsInfo;
1030 
1031  paramsInfo.m_InputToInputWeights =
1032  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1033  paramsInfo.m_InputToForgetWeights =
1034  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1035  paramsInfo.m_InputToCellWeights =
1036  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1037  paramsInfo.m_InputToOutputWeights =
1038  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1039 
1040  paramsInfo.m_RecurrentToInputWeights =
1041  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1042  paramsInfo.m_RecurrentToForgetWeights =
1043  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1044  paramsInfo.m_RecurrentToCellWeights =
1045  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1046  paramsInfo.m_RecurrentToOutputWeights =
1047  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1048 
1049  paramsInfo.m_InputGateBias =
1050  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1051  paramsInfo.m_ForgetGateBias =
1052  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1053  paramsInfo.m_CellBias =
1054  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1055  paramsInfo.m_OutputGateBias =
1056  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1057 
1058  result = layerSupportObject.IsQuantizedLstmSupported(input,
1059  previousCellStateIn,
1060  previousOutputIn,
1061  cellStateOut,
1062  output,
1063  paramsInfo,
1064  reason);
1065  break;
1066  }
1067  case LayerType::Division:
1068  {
1069  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1070  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1071  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1072  result = layerSupportObject.IsDivisionSupported(
1073  OverrideDataType(input0, dataType),
1074  OverrideDataType(input1, dataType),
1075  OverrideDataType(output, dataType),
1076  reason);
1077  break;
1078  }
1079  case LayerType::Rank:
1080  {
1081  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1082  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1083  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1084  OverrideDataType(output, dataType),
1085  reason);
1086  break;
1087  }
1088  case LayerType::Reshape:
1089  {
1090  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1091  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1092  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1093  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1094  OverrideDataType(output, dataType),
1095  cLayer->GetParameters(),
1096  reason);
1097  break;
1098  }
1099  case LayerType::Resize:
1100  {
1101  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1102  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1103  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1104  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1105  OverrideDataType(output, dataType),
1106  cLayer->GetParameters(),
1107  reason);
1108  break;
1109  }
1110  case LayerType::Shape:
1111  {
1112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1114 
1115  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1116  OverrideDataType(output, dataType),
1117  reason);
1118  break;
1119  }
1120  case LayerType::Slice:
1121  {
1122  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1123 
1124  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1125  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1126 
1127  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1128  OverrideDataType(output, dataType),
1129  cLayer->GetParameters(),
1130  reason);
1131  break;
1132  }
1133  case LayerType::Softmax:
1134  {
1135  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1136  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1137  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1138  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1139  OverrideDataType(output, dataType),
1140  cLayer->GetParameters(),
1141  reason);
1142  break;
1143  }
1145  {
1146  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1147  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1148  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1149  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1150  OverrideDataType(output, dataType),
1151  cLayer->GetParameters(),
1152  reason);
1153  break;
1154  }
1156  {
1157  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1158 
1159  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1160  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1161 
1162  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1163  OverrideDataType(output, dataType),
1164  cLayer->GetParameters(),
1165  reason);
1166  break;
1167  }
1168  case LayerType::Splitter:
1169  {
1170  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1171  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1172 
1173  // Get vector of all outputs.
1174  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1175  {
1176  return OverrideDataType(slot.GetTensorInfo(), dataType);
1177  };
1178  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1179  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1180  std::vector<TensorInfo> outputs(beginI, endI);
1181 
1182  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1183 
1184  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1185  outputPtrs,
1186  cLayer->GetParameters(),
1187  reason);
1188  break;
1189  }
1190  case LayerType::Stack:
1191  {
1192  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1193 
1194  // Get vector of all inputs.
1195  auto getTensorInfo = [&dataType](const InputSlot& slot)
1196  {
1197  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1198  };
1199  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1200  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1201  std::vector<TensorInfo> inputs(beginI, endI);
1202 
1203  auto getTensorInfoPtr = [](const TensorInfo& info)
1204  {
1205  return &info;
1206  };
1207  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1208  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1209  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1210 
1211  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1212 
1213  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1214 
1215  break;
1216  }
1217  case LayerType::StandIn:
1218  {
1219  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1220 
1221  // Get vector of all inputs.
1222  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1223  {
1224  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1225  };
1226  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1227  {
1228  return OverrideDataType(slot.GetTensorInfo(), dataType);
1229  };
1230  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1231  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1232  std::vector<TensorInfo> inputs(beginI, endI);
1233 
1234  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1235  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1236  std::vector<TensorInfo> outputs(beginO, endO);
1237 
1238 
1239  auto getTensorInfoPtr = [](const TensorInfo& info)
1240  {
1241  return &info;
1242  };
1243  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1244  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1245  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1246 
1247  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1248  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1249  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1250 
1251 
1252  result = layerSupportObject.IsStandInSupported(inputPtrs,
1253  outputPtrs,
1254  cLayer->GetParameters(),
1255  reason);
1256  break;
1257  }
1259  {
1260  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1261  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1262  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1263  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1264  OverrideDataType(output, dataType),
1265  cLayer->GetParameters(),
1266  reason);
1267  break;
1268  }
1270  {
1271  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1272  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1273  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1274  result = layerSupportObject.IsSubtractionSupported(
1275  OverrideDataType(input0, dataType),
1276  OverrideDataType(input1, dataType),
1277  OverrideDataType(output, dataType),
1278  reason);
1279  break;
1280  }
1281  case LayerType::Switch:
1282  {
1283  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1284  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1285  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1286  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1287  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1288  OverrideDataType(input1, dataType),
1289  OverrideDataType(output0, dataType),
1290  OverrideDataType(output1, dataType),
1291  reason);
1292  break;
1293  }
1294  case LayerType::Mean:
1295  {
1296  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1297  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1298  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1299  result = layerSupportObject.IsMeanSupported(
1300  OverrideDataType(input, dataType),
1301  OverrideDataType(output, dataType),
1302  cLayer->GetParameters(),
1303  reason);
1304  break;
1305  }
1306  case LayerType::Minimum:
1307  {
1308  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1309  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1310  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1311  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1312  OverrideDataType(input1, dataType),
1313  OverrideDataType(output, dataType),
1314  reason);
1315  break;
1316  }
1317  case LayerType::Prelu:
1318  {
1319  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1320  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1321  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1322  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1323  OverrideDataType(alpha, dataType),
1324  OverrideDataType(output, dataType),
1325  reason);
1326  break;
1327  }
1328  case LayerType::Transpose:
1329  {
1330  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1331  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1332  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1333  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1334  OverrideDataType(output, dataType),
1335  cLayer->GetParameters(),
1336  reason);
1337  break;
1338  }
1340  {
1341  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1342 
1343  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1344  dataType);
1345  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1346 
1347  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1348 
1349  Optional<TensorInfo> biases;
1350  if (descriptor.m_BiasEnabled)
1351  {
1352  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1353  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1354  GetBiasTypeFromWeightsType(dataType));
1355  }
1356 
1357  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1358  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1359 
1360  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1361  output,
1362  descriptor,
1363  weights,
1364  biases,
1365  reason);
1366 
1367  break;
1368  }
1369  case LayerType::Reduce:
1370  {
1371  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1372  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1373  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1374 
1375  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1376  OverrideDataType(output, dataType),
1377  cLayer->GetParameters(),
1378  reason);
1379  break;
1380  }
1382  {
1383  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1384  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1385 
1386  // All inputs.
1387  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1388  dataType);
1389  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1390  dataType);
1391  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1392  dataType);
1393  // Outputs
1394  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1395  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1396  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1397 
1398  // Basic parameters
1399  const TensorInfo& inputToForgetWeights
1400  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1401  const TensorInfo& inputToCellWeights
1402  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1403  const TensorInfo& inputToOutputWeights
1404  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1405  const TensorInfo& recurrentToForgetWeights
1406  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1407  const TensorInfo& recurrentToCellWeights
1408  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1409  const TensorInfo& recurrentToOutputWeights
1410  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1411  const TensorInfo& forgetGateBias
1412  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1413  const TensorInfo& cellBias
1414  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1415  const TensorInfo& outputGateBias
1416  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1417 
1418  LstmInputParamsInfo paramsInfo;
1419 
1420  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1421  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1422  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1423  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1424  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1425  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1426  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1427  paramsInfo.m_CellBias = &cellBias;
1428  paramsInfo.m_OutputGateBias = &outputGateBias;
1429 
1430  // Optional parameters
1431  TensorInfo optInputToInputWeights;
1432  TensorInfo optRecurrentToInputWeights;
1433  TensorInfo optCellToInputWeights;
1434  TensorInfo optInputGateBias;
1435  TensorInfo optProjectionWeights;
1436  TensorInfo optProjectionBias;
1437  TensorInfo optCellToForgetWeights;
1438  TensorInfo optCellToOutputWeights;
1439  TensorInfo optInputLayerNormWeights;
1440  TensorInfo optForgetLayerNormWeights;
1441  TensorInfo optCellLayerNormWeights;
1442  TensorInfo optOutputLayerNormWeights;
1443 
1444  if(!descriptor.m_CifgEnabled)
1445  {
1446  optInputToInputWeights =
1447  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1448  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1449 
1450  optRecurrentToInputWeights =
1451  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1452  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1453  optInputGateBias =
1454  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1455  paramsInfo.m_InputGateBias = &optInputGateBias;
1456  }
1457 
1458  if(descriptor.m_ProjectionEnabled)
1459  {
1460  optProjectionWeights =
1461  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1462  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1463  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1464  {
1465  optProjectionBias =
1466  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1467  paramsInfo.m_ProjectionBias = &optProjectionBias;
1468  }
1469  }
1470 
1471  if(descriptor.m_PeepholeEnabled)
1472  {
1473  if(!descriptor.m_CifgEnabled)
1474  {
1475  optCellToInputWeights =
1476  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1477  dataType);
1478  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1479  }
1480  optCellToForgetWeights =
1481  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1482  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1483  optCellToOutputWeights =
1484  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1485  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1486  }
1487 
1488  if(descriptor.m_LayerNormEnabled)
1489  {
1490  if (!descriptor.m_CifgEnabled)
1491  {
1492  optInputLayerNormWeights = OverrideDataType(
1493  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1494  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1495  }
1496 
1497  optForgetLayerNormWeights = OverrideDataType(
1498  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1499  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1500 
1501  optCellLayerNormWeights = OverrideDataType(
1502  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1503  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1504 
1505  optOutputLayerNormWeights = OverrideDataType(
1506  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1507  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1508  }
1509 
1510  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1511  outputStateIn,
1512  cellStateIn,
1513  outputStateOut,
1514  cellStateOut,
1515  output,
1516  descriptor,
1517  paramsInfo,
1518  reason);
1519  break;
1520  }
1521  default:
1522  {
1523  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1524  reason.value() = "Unrecognised layer type";
1525  result = false;
1526  break;
1527  }
1528  }
1529  return result;
1530 }
1531 
1533  const IConnectableLayer& connectableLayer,
1534  Optional<DataType> dataType,
1535  std::string& outReasonIfUnsupported)
1536 {
1537  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1538 }
1539 
1541  Optional<DataType> dataType,
1542  std::string& outReasonIfUnsupported)
1543 {
1544  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1545  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1546 }
1547 
1548 // TODO merge with defaulted modelOptions above
1550  Optional<DataType> dataType,
1551  std::string& outReasonIfUnsupported,
1552  const ModelOptions& modelOptions)
1553 {
1554  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1555  return IsLayerConfigurationSupported(layer->GetBackendId(),
1556  connectableLayer,
1557  dataType,
1558  outReasonIfUnsupported,
1559  modelOptions);
1560 }
1561 
1563  const IConnectableLayer& connectableLayer,
1564  Optional<DataType> dataType,
1565  std::string& outReasonIfUnsupported,
1566  const ModelOptions& modelOptions)
1567 {
1568  return IsLayerConfigurationSupported(backendId,
1569  connectableLayer,
1570  dataType,
1571  outReasonIfUnsupported,
1572  modelOptions);
1573 }
1575 std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
1576  const QueueDescriptor& descriptor,
1577  const WorkloadInfo& info) const
1578 {
1579  switch(type)
1580  {
1581  case LayerType::Activation :
1582  {
1583  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
1584  return CreateActivation(*activationQueueDescriptor, info);
1585  }
1586  case LayerType::Addition :
1587  {
1588  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
1589  return CreateAddition(*additionQueueDescriptor, info);
1590  }
1591  case LayerType::ArgMinMax :
1592  {
1593  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
1594  return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
1595  }
1597  {
1598  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
1599  return CreateBatchNormalization(*batchNormQueueDescriptor, info);
1600  }
1602  {
1603  auto batchToSpaceNdQueueDescriptor
1604  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
1605  return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
1606  }
1607  case LayerType::Cast :
1608  {
1609  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
1610  return CreateCast(*castQueueDescriptor, info);
1611  }
1613  {
1614  auto channelShuffleQueueDescriptor
1615  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
1616  return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
1617  }
1618  case LayerType::Comparison :
1619  {
1620  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
1621  return CreateComparison(*comparisonQueueDescriptor, info);
1622  }
1623  case LayerType::Concat :
1624  {
1625  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
1626  return CreateConcat(*concatQueueDescriptor, info);
1627  }
1628  case LayerType::Constant :
1629  {
1630  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
1631  return CreateConstant(*constantQueueDescriptor, info);
1632  }
1634  {
1635  auto convertBf16ToFp32QueueDescriptor
1636  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
1637  return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
1638  }
1640  {
1641  auto convertFp16ToFp32QueueDescriptor
1642  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
1643  return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
1644  }
1646  {
1647  auto convertFp32ToBf16QueueDescriptor
1648  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
1649  return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
1650  }
1652  {
1653  auto convertFp32ToFp16QueueDescriptor
1654  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
1655  return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
1656  }
1658  {
1659  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
1660  return CreateConvolution2d(*convolution2dQueueDescriptor, info);
1661  }
1663  {
1664  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
1665  return CreateConvolution3d(*convolution3dQueueDescriptor, info);
1666  }
1667  case LayerType::Debug:
1668  {
1669  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
1670  return CreateDebug(*debugQueueDescriptor, info);
1671  }
1673  {
1674  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
1675  return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
1676  }
1678  {
1679  auto depthwiseConvolution2DQueueDescriptor
1680  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
1681  return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
1682  }
1683  case LayerType::Dequantize:
1684  {
1685  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
1686  return CreateDequantize(*dequantizeQueueDescriptor, info);
1687  }
1689  {
1690  auto detectionPostProcessQueueDescriptor
1691  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
1692  return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
1693  }
1694  case LayerType::Division:
1695  {
1696  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
1697  return CreateDivision(*divisionQueueDescriptor, info);
1698  }
1700  {
1701  auto elementwiseUnaryQueueDescriptor
1702  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
1703  return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
1704 
1705  }
1707  {
1708  auto fakeQuantizationQueueDescriptor
1709  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
1710  return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
1711  }
1712  case LayerType::Fill:
1713  {
1714  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
1715  return CreateFill(*fillQueueDescriptor, info);
1716  }
1717  case LayerType::Floor:
1718  {
1719  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
1720  return CreateFloor(*floorQueueDescriptor, info);
1721  }
1723  {
1724  auto fullyConnectedQueueDescriptor
1725  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
1726  return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
1727  }
1728  case LayerType::Gather:
1729  {
1730  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
1731  return CreateGather(*gatherQueueDescriptor, info);
1732  }
1733  case LayerType::Input:
1734  {
1735  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
1736  return CreateInput(*inputQueueDescriptor, info);
1737  }
1739  {
1740  auto instanceNormalizationQueueDescriptor
1741  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
1742  return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
1743  }
1745  {
1746  auto l2NormalizationQueueDescriptor
1747  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
1748  return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
1749  }
1751  {
1752  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
1753  return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
1754  }
1755  case LayerType::LogSoftmax:
1756  {
1757  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
1758  return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
1759  }
1760  case LayerType::Lstm:
1761  {
1762  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
1763  return CreateLstm(*lstmQueueDescriptor, info);
1764  }
1765  case LayerType::Maximum:
1766  {
1767  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
1768  return CreateMaximum(*maximumQueueDescriptor, info);
1769  }
1770  case LayerType::Mean:
1771  {
1772  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
1773  return CreateMean(*meanQueueDescriptor, info);
1774  }
1775  case LayerType::MemCopy:
1776  {
1777  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
1778  return CreateMemCopy(*memCopyQueueDescriptor, info);
1779  }
1780  case LayerType::MemImport:
1781  {
1782  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
1783  return CreateMemImport(*memImportQueueDescriptor, info);
1784  }
1785  case LayerType::Minimum:
1786  {
1787  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
1788  return CreateMinimum(*minimumQueueDescriptor, info);
1789  }
1791  {
1792  auto multiplicationQueueDescriptor
1793  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
1794  return CreateMultiplication(*multiplicationQueueDescriptor, info);
1795  }
1797  {
1798  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
1799  return CreateNormalization(*normalizationQueueDescriptor, info);
1800  }
1801  case LayerType::Output:
1802  {
1803  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
1804  return CreateOutput(*outputQueueDescriptor, info);
1805  }
1806  case LayerType::Pad:
1807  {
1808  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
1809  return CreatePad(*padQueueDescriptor, info);
1810  }
1811  case LayerType::Permute:
1812  {
1813  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
1814  return CreatePermute(*permuteQueueDescriptor, info);
1815  }
1816  case LayerType::Pooling2d:
1817  {
1818  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
1819  return CreatePooling2d(*pooling2dQueueDescriptor, info);
1820  }
1821  case LayerType::Pooling3d:
1822  {
1823  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
1824  return CreatePooling3d(*pooling3dQueueDescriptor, info);
1825  }
1827  {
1828  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
1829  return CreatePreCompiled(*preCompiledQueueDescriptor, info);
1830  }
1831  case LayerType::Prelu:
1832  {
1833  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
1834  return CreatePrelu(*preluQueueDescriptor, info);
1835  }
1836  case LayerType::QLstm:
1837  {
1838  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
1839  return CreateQLstm(*qlstmQueueDescriptor, info);
1840  }
1841  case LayerType::Quantize:
1842  {
1843  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
1844  return CreateQuantize(*quantizeQueueDescriptor, info);
1845  }
1846  case LayerType::Rank:
1847  {
1848  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
1849  return CreateRank(*rankQueueDescriptor, info);
1850  }
1851  case LayerType::Reduce:
1852  {
1853  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
1854  return CreateReduce(*reduceQueueDescriptor, info);
1855  }
1856  case LayerType::Reshape:
1857  {
1858  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
1859  return CreateReshape(*reshapeQueueDescriptor, info);
1860  }
1861  case LayerType::Resize:
1862  {
1863  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
1864  return CreateResize(*resizeQueueDescriptor, info);
1865  }
1866  case LayerType::Shape:
1867  {
1868  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
1869  return CreateShape(*shapeQueueDescriptor, info);
1870  }
1871  case LayerType::Slice:
1872  {
1873  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
1874  return CreateSlice(*sliceQueueDescriptor, info);
1875  }
1876  case LayerType::Softmax:
1877  {
1878  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
1879  return CreateSoftmax(*softmaxQueueDescriptor, info);
1880  }
1882  {
1883  auto spaceToBatchNdQueueDescriptor
1884  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
1885  return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
1886  }
1888  {
1889  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
1890  return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
1891  }
1892  case LayerType::Splitter:
1893  {
1894  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
1895  return CreateSplitter(*splitterQueueDescriptor, info);
1896  }
1897  case LayerType::Stack:
1898  {
1899  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
1900  return CreateStack(*stackQueueDescriptor, info);
1901  }
1903  {
1904  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
1905  return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
1906  }
1908  {
1909  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
1910  return CreateSubtraction(*subtractionQueueDescriptor, info);
1911  }
1912  case LayerType::Transpose:
1913  {
1914  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
1915  return CreateTranspose(*transposeQueueDescriptor, info);
1916  }
1918  {
1919  auto transposeConvolution2dQueueDescriptor
1920  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
1921  return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
1922  }
1924  {
1925  auto unidirectionalSequenceLstmQueueDescriptor
1926  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
1927  return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
1928  }
1929  default:
1930  return nullptr;
1931  }
1932 }
1934 
1935 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1936  const WorkloadInfo& /*info*/) const
1937 {
1938  return std::unique_ptr<IWorkload>();
1939 }
1940 
1941 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1942  const WorkloadInfo& /*info*/) const
1943 {
1944  return std::unique_ptr<IWorkload>();
1945 }
1946 
1947 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1948  const WorkloadInfo& /*info*/) const
1949 {
1950  return std::unique_ptr<IWorkload>();
1951 }
1952 
1953 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1954  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1955 {
1956  return std::unique_ptr<IWorkload>();
1957 }
1958 
1959 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1960  const WorkloadInfo& /*Info*/) const
1961 {
1962  return std::unique_ptr<IWorkload>();
1963 }
1964 
1965 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1966  const WorkloadInfo& /*info*/) const
1967 {
1968  return std::unique_ptr<IWorkload>();
1969 }
1970 
1971 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1972  const WorkloadInfo& /*info*/) const
1973 {
1974  return std::unique_ptr<IWorkload>();
1975 }
1976 
1977 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1978  const WorkloadInfo& /*info*/) const
1979 {
1980  return std::unique_ptr<IWorkload>();
1981 }
1982 
1983 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1984  const WorkloadInfo& /*info*/) const
1985 {
1986  return std::unique_ptr<IWorkload>();
1987 }
1988 
1989 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1990  const WorkloadInfo& /*info*/) const
1991 {
1992  return std::unique_ptr<IWorkload>();
1993 }
1994 
1995 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
1996  const WorkloadInfo& /*info*/) const
1997 {
1998  return std::unique_ptr<IWorkload>();
1999 }
2000 
2001 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
2002  const WorkloadInfo& /*info*/) const
2003 {
2004  return std::unique_ptr<IWorkload>();
2005 }
2006 
2007 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
2008  const WorkloadInfo& /*info*/) const
2009 {
2010  return std::unique_ptr<IWorkload>();
2011 }
2012 
2013 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
2014  const WorkloadInfo& /*info*/) const
2015 {
2016  return std::unique_ptr<IWorkload>();
2017 }
2018 
2019 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
2020  const WorkloadInfo& /*info*/) const
2021 {
2022  return std::unique_ptr<IWorkload>();
2023 }
2024 
2025 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
2026  const WorkloadInfo& /*info*/) const
2027 {
2028  return std::unique_ptr<IWorkload>();
2029 }
2030 
2031 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
2032  const WorkloadInfo& /*info*/) const
2033 {
2034  return std::unique_ptr<IWorkload>();
2035 }
2036 
2037 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
2038  const WorkloadInfo& /*info*/) const
2039 {
2040  return std::unique_ptr<IWorkload>();
2041 }
2042 
2043 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
2044  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2045 {
2046  return std::unique_ptr<IWorkload>();
2047 }
2048 
2049 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
2050  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2051 {
2052  return std::unique_ptr<IWorkload>();
2053 }
2054 
2055 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
2056  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2057 {
2058  return std::unique_ptr<IWorkload>();
2059 }
2060 
2061 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
2062  const WorkloadInfo& /*info*/) const
2063 {
2064  return std::unique_ptr<IWorkload>();
2065 }
2066 
2067 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2068  const WorkloadInfo& /*info*/) const
2069 {
2070  return std::unique_ptr<IWorkload>();
2071 }
2072 
2073 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
2074  const WorkloadInfo& /*info*/) const
2075 {
2076  return std::unique_ptr<IWorkload>();
2077 }
2078 
2079 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
2080  const WorkloadInfo& /*info*/) const
2081 {
2082  return std::unique_ptr<IWorkload>();
2083 }
2084 
2085 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
2086  const WorkloadInfo& /*info*/) const
2087 {
2088  return std::unique_ptr<IWorkload>();
2089 }
2090 
2091 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
2092  const WorkloadInfo& /*info*/) const
2093 {
2094  return std::unique_ptr<IWorkload>();
2095 }
2096 
2097 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
2098  const WorkloadInfo& /*info*/) const
2099 {
2100  return std::unique_ptr<IWorkload>();
2101 }
2102 
2103 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
2104  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
2105  const WorkloadInfo& /*info*/) const
2106 {
2107  return std::unique_ptr<IWorkload>();
2108 }
2109 
2110 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
2111  const WorkloadInfo& /*info*/) const
2112 {
2113  return std::unique_ptr<IWorkload>();
2114 }
2115 
2116 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
2117  const WorkloadInfo& /*info*/) const
2118 {
2119  return std::unique_ptr<IWorkload>();
2120 }
2121 
2122 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2123  const WorkloadInfo& /*info*/) const
2124 {
2125  return std::unique_ptr<IWorkload>();
2126 }
2127 
2128 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
2129  const WorkloadInfo& /*info*/) const
2130 {
2131  return std::unique_ptr<IWorkload>();
2132 }
2133 
2134 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
2135  const WorkloadInfo& /*info*/) const
2136 {
2137  return std::unique_ptr<IWorkload>();
2138 }
2139 
2140 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
2141  const WorkloadInfo& /*info*/) const
2142 {
2143  return std::unique_ptr<IWorkload>();
2144 }
2145 
2146 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
2147  const WorkloadInfo& /*Info*/) const
2148 {
2149  return std::unique_ptr<IWorkload>();
2150 }
2151 
2152 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
2153  const WorkloadInfo& /*info*/) const
2154 {
2155  return std::unique_ptr<IWorkload>();
2156 }
2157 
2158 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
2159  const WorkloadInfo& /*info*/) const
2160 {
2161  return std::unique_ptr<IWorkload>();
2162 }
2163 
2164 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
2165  const WorkloadInfo& /*info*/) const
2166 {
2167  return std::unique_ptr<IWorkload>();
2168 }
2169 
2170 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
2171  const WorkloadInfo& /*info*/) const
2172 {
2173  return std::unique_ptr<IWorkload>();
2174 }
2175 
2176 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
2177  const WorkloadInfo& /*info*/) const
2178 {
2179  return std::unique_ptr<IWorkload>();
2180 }
2181 
2182 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
2183  const WorkloadInfo& /*info*/) const
2184 {
2185  return std::unique_ptr<IWorkload>();
2186 }
2187 
2188 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
2189  const WorkloadInfo& /*info*/) const
2190 {
2191  return std::unique_ptr<IWorkload>();
2192 }
2193 
2194 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
2195  const WorkloadInfo& /*Info*/) const
2196 {
2197  return std::unique_ptr<IWorkload>();
2198 }
2199 
2200 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
2201  const WorkloadInfo& /*info*/) const
2202 {
2203  return std::unique_ptr<IWorkload>();
2204 }
2205 
2206 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
2207  const WorkloadInfo& /*info*/) const
2208 {
2209  return std::unique_ptr<IWorkload>();
2210 }
2211 
2212 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/,
2213  const WorkloadInfo& /*info*/) const
2214 {
2215  return std::unique_ptr<IWorkload>();
2216 }
2217 
2218 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
2219  const WorkloadInfo& /*info*/) const
2220 {
2221  return std::unique_ptr<IWorkload>();
2222 }
2223 
2224 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
2225  const WorkloadInfo &/*info*/) const
2226 {
2227  return std::unique_ptr<IWorkload>();
2228 }
2229 
2230 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
2231  const WorkloadInfo& /*Info*/) const
2232 {
2233  return std::unique_ptr<IWorkload>();
2234 }
2235 
2236 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
2237  const WorkloadInfo& /*info*/) const
2238 {
2239  return std::unique_ptr<IWorkload>();
2240 }
2241 
2242 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
2243  const WorkloadInfo& /*info*/) const
2244 {
2245  return std::unique_ptr<IWorkload>();
2246 }
2247 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
2248  const WorkloadInfo& /*info*/) const
2249 {
2250  return std::unique_ptr<IWorkload>();
2251 }
2252 
2253 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
2254  const WorkloadInfo& /*info*/) const
2255 {
2256  return std::unique_ptr<IWorkload>();
2257 }
2258 
2259 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
2260  const WorkloadInfo& /*info*/) const
2261 {
2262  return std::unique_ptr<IWorkload>();
2263 }
2264 
2265 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
2266  const WorkloadInfo& /*info*/) const
2267 {
2268  return std::unique_ptr<IWorkload>();
2269 }
2270 
2271 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
2272  const WorkloadInfo& /*info*/) const
2273 {
2274  return std::unique_ptr<IWorkload>();
2275 }
2276 
2277 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
2278  const WorkloadInfo& /*info*/) const
2279 {
2280  return std::unique_ptr<IWorkload>();
2281 }
2282 
2283 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
2284  const WorkloadInfo& /*info*/) const
2285 {
2286  return std::unique_ptr<IWorkload>();
2287 }
2288 
2289 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
2290  const WorkloadInfo& /*info*/) const
2291 {
2292  return std::unique_ptr<IWorkload>();
2293 }
2294 
2295 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
2296  const WorkloadInfo& /*info*/) const
2297 {
2298  return std::unique_ptr<IWorkload>();
2299 }
2300 
2301 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
2302  const WorkloadInfo& /*info*/) const
2303 {
2304  return std::unique_ptr<IWorkload>();
2305 }
2306 
2307 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
2308  const WorkloadInfo& /*info*/) const
2309 {
2310  return std::unique_ptr<IWorkload>();
2311 }
2312 
2313 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
2314  const WorkloadInfo& /*info*/) const
2315 {
2316  return std::unique_ptr<IWorkload>();
2317 }
2318 
2319 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
2320  const WorkloadInfo& /*info*/) const
2321 {
2322  return std::unique_ptr<IWorkload>();
2323 }
2324 
2325 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
2326  const WorkloadInfo& /*info*/) const
2327 {
2328  return std::unique_ptr<IWorkload>();
2329 }
2330 
2331 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
2332  const WorkloadInfo& /*info*/) const
2333 {
2334  return std::unique_ptr<IWorkload>();
2335 }
2336 
2337 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
2338  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
2339  const WorkloadInfo& /*info*/) const
2340 {
2341  return std::unique_ptr<IWorkload>();
2342 }
2343 
2344 std::unique_ptr<IWorkload> IWorkloadFactory::CreateUnidirectionalSequenceLstm(
2345  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
2346  const WorkloadInfo& /*info*/) const
2347 {
2348  return std::unique_ptr<IWorkload>();
2349 }
2350 
2351 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInput(
2352  const InputQueueDescriptor& /*descriptor*/,
2353  const WorkloadInfo& /*info*/) const
2354 {
2355  return std::unique_ptr<IWorkload>();
2356 }
2357 
2358 } // namepsace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::vector< BackendOptions > ModelOptions
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
BackendRegistry & BackendRegistryInstance()
Copyright (c) 2021 ARM Limited and Contributors.
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LstmDescriptor UnidirectionalSequenceLstmDescriptor
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468