ArmNN
 23.05
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
18 
19 #include <sstream>
20 
21 namespace armnn
22 {
23 
24 namespace
25 {
26 using LayerList = std::list<Layer*>;
27 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
28 
29 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
30 {
31  if (!type)
32  {
33  return info;
34  }
35 
36  return TensorInfo(info.GetShape(),
37  type.value(),
38  info.GetQuantizationScale(),
39  info.GetQuantizationOffset(),
40  info.IsConstant());
41 }
42 
43 } // anonymous namespace
44 
46 {
47  if (!weightsType)
48  {
49  return weightsType;
50  }
51 
52  switch(weightsType.value())
53  {
57  return weightsType;
63  default:
64  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
65  }
66  return armnn::EmptyOptional();
67 }
68 
69 
70 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
71  const IConnectableLayer& connectableLayer,
72  Optional<DataType> dataType,
73  std::string& outReasonIfUnsupported,
74  const ModelOptions& modelOptions)
75 {
76  Optional<std::string&> reason = outReasonIfUnsupported;
77  bool result;
78  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
79 
80  auto const& backendRegistry = BackendRegistryInstance();
81  if (!backendRegistry.IsBackendRegistered(backendId))
82  {
83  std::stringstream ss;
84  ss << connectableLayer.GetName() << " is not supported on " << backendId
85  << " because this backend is not registered.";
86 
87  outReasonIfUnsupported = ss.str();
88  return false;
89  }
90 
91  auto backendFactory = backendRegistry.GetFactory(backendId);
92  auto backendObject = backendFactory();
93  auto layerSupport = backendObject->GetLayerSupport(modelOptions);
94  auto layerSupportObject = LayerSupportHandle(layerSupport, backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
113  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
114  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
115  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
116  result = layerSupportObject.IsAdditionSupported(
117  OverrideDataType(input0, dataType),
118  OverrideDataType(input1, dataType),
119  OverrideDataType(output, dataType),
120  reason);
122  break;
123  }
125  {
126  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
127  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
128 
129  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
130  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
131  result = layerSupportObject.IsArgMinMaxSupported(
132  OverrideDataType(input, dataType),
133  OverrideDataType(output, DataType::Signed32),
134  descriptor,
135  reason);
136  break;
137  }
139  {
140  auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
141  const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
142 
143  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
144  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
145  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
146  result = layerSupportObject.IsBatchMatMulSupported(
147  OverrideDataType(input0, dataType),
148  OverrideDataType(input1, dataType),
149  OverrideDataType(output, dataType),
150  descriptor,
151  reason);
152  break;
153  }
155  {
156  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
157  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
160  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
161  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
162  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
163  result = layerSupportObject.IsBatchNormalizationSupported(
164  OverrideDataType(input, dataType),
165  OverrideDataType(output, dataType),
166  OverrideDataType(mean, dataType),
167  OverrideDataType(var, dataType),
168  OverrideDataType(beta, dataType),
169  OverrideDataType(gamma, dataType),
170  cLayer->GetParameters(),
171  reason);
172  break;
173  }
175  {
176  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
177  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
178  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
179 
180  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
181  OverrideDataType(output, dataType),
182  cLayer->GetParameters(),
183  reason);
184  break;
185  }
186  case LayerType::Cast:
187  {
188  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
189  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
190 
191  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
192  OverrideDataType(output, dataType),
193  reason);
194  break;
195  }
197  {
198  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
199 
200  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
201  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
202 
203  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
204 
205  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
206  OverrideDataType(output, dataType),
207  descriptor,
208  reason);
209  break;
210  }
212  {
213  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
214 
215  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
216  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
217  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
218 
219  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
220  OverrideDataType(input1, dataType),
221  OverrideDataType(output, DataType::Boolean),
222  cLayer->GetParameters(),
223  reason);
224  break;
225  }
226  case LayerType::Constant:
227  {
228  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
229  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
230  break;
231  }
233  {
234  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
235  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
236  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
237  break;
238  }
240  {
241  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
242  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
243  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
244  break;
245  }
247  {
248  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
249 
250  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
251  dataType);
252  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
253  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
254  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
255  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
256  dataType);
257 
258  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
259 
260  // Construct optional biases object based on the value of m_BiasEnabled
261  Optional<TensorInfo> biases;
262  if (descriptor.m_BiasEnabled)
263  {
264  ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
265  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
266  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
267  GetBiasTypeFromWeightsType(dataType));
268  }
269 
270  result = layerSupportObject.IsConvolution2dSupported(
271  input,
272  output,
273  descriptor,
274  weights,
275  biases,
276  reason);
277  break;
278  }
280  {
281  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
282 
283  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
284  dataType);
285  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
286 
287  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
288  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
289  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
290  dataType);
291 
292  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
293 
294  // Construct optional biases object based on the value of m_BiasEnabled
295  Optional<TensorInfo> biases;
296  if (descriptor.m_BiasEnabled)
297  {
298  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
299  GetBiasTypeFromWeightsType(dataType));
300  }
301 
302  result = layerSupportObject.IsConvolution3dSupported(
303  input,
304  output,
305  descriptor,
306  weights,
307  biases,
308  reason);
309  break;
310  }
311  case LayerType::Debug:
312  {
313  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
314  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
315 
316  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
317  OverrideDataType(output, dataType),
318  reason);
319  break;
320  }
322  {
323  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
324 
325  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
326  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
327 
328  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
329  OverrideDataType(output, dataType),
330  cLayer->GetParameters(),
331  reason);
332  break;
333  }
335  {
336  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
337  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
338  dataType);
339  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
340  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
341  dataType);
342 
343  ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
344 
345  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
346 
347  // Construct optional biases object based on the value of m_BiasEnabled
348  Optional<TensorInfo> biases;
349  if (descriptor.m_BiasEnabled)
350  {
351  biases = OverrideDataType(cLayer->GetInputSlot(2).GetConnection()->GetTensorInfo(),
352  GetBiasTypeFromWeightsType(dataType));
353  }
354 
355  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
356  output,
357  descriptor,
358  weights,
359  biases,
360  reason);
361  break;
362  }
364  {
365  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
366  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
367 
368  result = layerSupportObject.IsDequantizeSupported(input,
369  OverrideDataType(output, dataType),
370  reason);
371  break;
372  }
374  {
375  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
376  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
377  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
378  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
379 
380  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
381  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
382  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
383  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
384 
385  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
386  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
387  scores,
388  anchors,
389  detectionBoxes,
390  detectionClasses,
391  detectionScores,
392  numDetections,
393  descriptor,
394  reason);
395  break;
396  }
398  {
399  auto cLayer = PolymorphicDowncast<const ElementwiseBinaryLayer*>(&layer);
400 
401  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
402  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
403  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
404  std::vector<TensorInfo> infos = { OverrideDataType(input0, dataType),
405  OverrideDataType(input1, dataType),
406  OverrideDataType(output, dataType) };
407  result = layerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
408  infos,
409  cLayer->GetParameters(),
410  EmptyOptional(),
411  EmptyOptional(),
412  reason);
413  break;
414  }
416  {
417  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
418 
419  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
420  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
421 
422  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
423  OverrideDataType(output, dataType),
424  cLayer->GetParameters(),
425  reason);
426  break;
427  }
428  case LayerType::Fill:
429  {
430  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
431  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
432  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
433  const FillDescriptor& descriptor = cLayer->GetParameters();
434 
435  result = layerSupportObject.IsFillSupported(
436  OverrideDataType(input, dataType),
437  OverrideDataType(output, dataType),
438  descriptor,
439  reason);
440  break;
441  }
443  {
444  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
445  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
446  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
447  cLayer->GetParameters(),
448  reason);
449  break;
450  }
451  case LayerType::Floor:
452  {
453  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
454  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
455  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
456  OverrideDataType(output, dataType),
457  reason);
458  break;
459  }
461  {
462  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
463  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
464  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
465 
466  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
467  TensorInfo weightsInfo;
468  const TensorInfo* weightsInfoPtr = nullptr;
469 
470  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
471  weightsInfoPtr = &weightsInfo;
472 
473  TensorInfo biasInfo;
474  const TensorInfo* biasInfoPtr = nullptr;
475  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
476  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
477  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
478  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
479 
480  if (descriptor.m_BiasEnabled)
481  {
482  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
483  biasInfoPtr = &biasInfo;
484  }
485  else
486  {
487  // If biases are not enabled pass a dummy tensorinfo for the validation
488  switch(input.GetDataType())
489  {
490  case DataType::BFloat16:
491  {
492  biasInfoPtr = &dummyBFloat16Bias;
493  break;
494  }
495  case DataType::Float16:
496  {
497  biasInfoPtr = &dummyFloat16Bias;
498  break;
499  }
500  case DataType::Float32:
501  {
502  biasInfoPtr = &dummyFloat32Bias;
503  break;
504  }
505  case DataType::QAsymmU8:
506  case DataType::QAsymmS8:
507  case DataType::QSymmS8:
508  case DataType::QSymmS16:
509  {
510  biasInfoPtr = &dummyQA8Bias;
511  break;
512  }
513  default:
514  {
515  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
516  }
517  }
518  }
519  result = layerSupportObject.IsFullyConnectedSupported(
520  OverrideDataType(input, dataType),
521  OverrideDataType(output, dataType),
522  *weightsInfoPtr,
523  *biasInfoPtr,
524  descriptor,
525  reason);
526  break;
527  }
528  case LayerType::Gather:
529  {
530  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
531  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
532  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
533  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
534  const GatherDescriptor& descriptor = cLayer->GetParameters();
535  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
536  input1,
537  OverrideDataType(output, dataType),
538  descriptor,
539  reason);
540  break;
541  }
542  case LayerType::GatherNd:
543  {
544  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
545  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
546  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
547  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
548  input1,
549  OverrideDataType(output, dataType),
550  reason);
551  break;
552  }
553  case LayerType::Input:
554  {
555  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
556  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
557  break;
558  }
560  {
561  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
562  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
563 
564  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
565  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
566 
567  result = layerSupportObject.IsInstanceNormalizationSupported(
568  OverrideDataType(input, dataType),
569  OverrideDataType(output, dataType),
570  descriptor,
571  reason);
572  break;
573  }
575  {
576  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
577  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
578 
579  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
580  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
581 
582  result = layerSupportObject.IsL2NormalizationSupported(
583  OverrideDataType(input, dataType),
584  OverrideDataType(output, dataType),
585  descriptor,
586  reason);
587  break;
588  }
590  {
591  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
592 
593  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
594  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
595  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
596 
597  result = layerSupportObject.IsLogicalBinarySupported(input0,
598  input1,
599  output,
600  cLayer->GetParameters(),
601  reason);
602  break;
603  }
605  {
606  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
607 
608  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
609  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
610 
611  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
612  OverrideDataType(output, dataType),
613  cLayer->GetParameters(),
614  reason);
615  break;
616  }
617  case LayerType::Lstm:
618  {
619  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
620  const LstmDescriptor& descriptor = cLayer->GetParameters();
621 
622  // All inputs.
623  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
624  dataType);
625  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
626  dataType);
627  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
628  dataType);
629  // All outputs
630  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
631  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
632  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
633  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
634 
635  // Basic parameters
636  const TensorInfo& inputToForgetWeights
637  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
638  const TensorInfo& inputToCellWeights
639  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
640  const TensorInfo& inputToOutputWeights
641  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
642  const TensorInfo& recurrentToForgetWeights
643  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
644  const TensorInfo& recurrentToCellWeights
645  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
646  const TensorInfo& recurrentToOutputWeights
647  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
648  const TensorInfo& forgetGateBias
649  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
650  const TensorInfo& cellBias
651  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
652  const TensorInfo& outputGateBias
653  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
654 
655  LstmInputParamsInfo paramsInfo;
656 
657  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
658  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
659  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
660  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
661  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
662  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
663  paramsInfo.m_ForgetGateBias = &forgetGateBias;
664  paramsInfo.m_CellBias = &cellBias;
665  paramsInfo.m_OutputGateBias = &outputGateBias;
666 
667 
668  // Optional parameters
669  TensorInfo optInputToInputWeights;
670  TensorInfo optRecurrentToInputWeights;
671  TensorInfo optCellToInputWeights;
672  TensorInfo optInputGateBias;
673  TensorInfo optProjectionWeights;
674  TensorInfo optProjectionBias;
675  TensorInfo optCellToForgetWeights;
676  TensorInfo optCellToOutputWeights;
677  TensorInfo optInputLayerNormWeights;
678  TensorInfo optForgetLayerNormWeights;
679  TensorInfo optCellLayerNormWeights;
680  TensorInfo optOutputLayerNormWeights;
681 
682  if(!descriptor.m_CifgEnabled)
683  {
684  optInputToInputWeights =
685  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
686  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
687 
688  optRecurrentToInputWeights =
689  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
690  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
691  optInputGateBias =
692  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
693  paramsInfo.m_InputGateBias = &optInputGateBias;
694  }
695 
696  if(descriptor.m_ProjectionEnabled)
697  {
698  optProjectionWeights =
699  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
700  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
701  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
702  {
703  optProjectionBias =
704  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
705  paramsInfo.m_ProjectionBias = &optProjectionBias;
706  }
707  }
708 
709  if(descriptor.m_PeepholeEnabled)
710  {
711  if(!descriptor.m_CifgEnabled)
712  {
713  optCellToInputWeights =
714  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
715  dataType);
716  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
717  }
718  optCellToForgetWeights =
719  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
720  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
721  optCellToOutputWeights =
722  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
723  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
724  }
725 
726  if(descriptor.m_LayerNormEnabled)
727  {
728  if (!descriptor.m_CifgEnabled)
729  {
730  optInputLayerNormWeights = OverrideDataType(
731  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
732  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
733  }
734 
735  optForgetLayerNormWeights = OverrideDataType(
736  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
737  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
738 
739  optCellLayerNormWeights = OverrideDataType(
740  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
741  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
742 
743  optOutputLayerNormWeights = OverrideDataType(
744  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
745  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
746  }
747 
748  result = layerSupportObject.IsLstmSupported(
749  input,
750  outputStateIn,
751  cellStateIn,
752  scratchBuffer,
753  outputStateOut,
754  cellStateOut,
755  output,
756  descriptor,
757  paramsInfo,
758  reason);
759  break;
760  }
761  case LayerType::Maximum:
762  {
764  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
765  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
766  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
767 
768  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
769  OverrideDataType(input1, dataType),
770  OverrideDataType(output, dataType),
771  reason);
773  break;
774  }
775  case LayerType::MemCopy:
776  {
777  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
778  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
779 
780  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
781  OverrideDataType(output, dataType),
782  reason);
783  break;
784  }
786  {
787  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
788  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
789 
790  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
791  OverrideDataType(output, dataType),
792  reason);
793  break;
794  }
795  case LayerType::Merge:
796  {
797  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
798  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
799  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
800 
801  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
802  OverrideDataType(input1, dataType),
803  OverrideDataType(output, dataType),
804  reason);
805  break;
806  }
807  case LayerType::Concat:
808  {
809  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
810 
811  // Get vector of all inputs.
812  auto getTensorInfo = [&dataType](const InputSlot& slot)
813  {
814  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
815  };
816 
817  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
818  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
819  std::vector<TensorInfo> inputs(beginI, endI);
820 
821  auto getTensorInfoPtr = [](const TensorInfo& info)
822  {
823  return &info;
824  };
825 
826  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
827  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
828  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
829 
830  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
831 
832  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
833 
834 
835  break;
836  }
838  {
840  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
841  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
842  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
843  result = layerSupportObject.IsMultiplicationSupported(
844  OverrideDataType(input0, dataType),
845  OverrideDataType(input1, dataType),
846  OverrideDataType(output, dataType),
847  reason);
849  break;
850  }
852  {
853  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
854  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
855  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
856  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
857  OverrideDataType(output, dataType),
858  cLayer->GetParameters(),
859  reason);
860  break;
861  }
862  case LayerType::Output:
863  {
864  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
865  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
866  break;
867  }
868  case LayerType::Permute:
869  {
870  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
871  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
872  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
873  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
874  OverrideDataType(output, dataType),
875  cLayer->GetParameters(),
876  reason);
877  break;
878  }
879  case LayerType::Pad:
880  {
881  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
882  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
883  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
884  result = layerSupportObject.IsPadSupported(
885  OverrideDataType(input, dataType),
886  OverrideDataType(output, dataType),
887  cLayer->GetParameters(),
888  reason);
889  break;
890  }
892  {
893  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
894  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
895  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
896  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
897  OverrideDataType(output, dataType),
898  cLayer->GetParameters(),
899  reason);
900  break;
901  }
903  {
904  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
905  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
906  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
907  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
908  OverrideDataType(output, dataType),
909  cLayer->GetParameters(),
910  reason);
911  break;
912  }
914  {
915  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
916  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
917  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
918  cLayer->GetParameters(),
919  reason);
920  break;
921  }
922  case LayerType::Quantize:
923  {
924  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
925  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
926  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
927  break;
928  }
929  case LayerType::QLstm:
930  {
931  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
932  const QLstmDescriptor& descriptor = cLayer->GetParameters();
933 
934  // Inputs
935  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
936  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
937  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
938 
939  // Outputs
940  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
941  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
942  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
943 
944  // Lstm parameters
945  LstmInputParamsInfo paramsInfo;
946 
947  // Basic parameters
948  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
949  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
950  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
951  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
952  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
953  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
954 
955  paramsInfo.m_RecurrentToForgetWeights =
956  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
957  paramsInfo.m_RecurrentToCellWeights =
958  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
959  paramsInfo.m_RecurrentToOutputWeights =
960  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
961 
962  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
963  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
964  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
965 
966  if(!descriptor.m_CifgEnabled)
967  {
968  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
969  paramsInfo.m_RecurrentToInputWeights =
970  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
971  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
972  }
973 
974  if(descriptor.m_ProjectionEnabled)
975  {
976  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
977 
978  // Projection bias is optional even if projection is enabled
979  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
980  {
981  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
982  }
983  }
984 
985  if(descriptor.m_PeepholeEnabled)
986  {
987  if (!descriptor.m_CifgEnabled)
988  {
989  paramsInfo.m_CellToInputWeights =
990  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
991  }
992 
993  paramsInfo.m_CellToForgetWeights =
994  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
995  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
996  }
997 
998  if(descriptor.m_LayerNormEnabled)
999  {
1000  if (!descriptor.m_CifgEnabled)
1001  {
1002  paramsInfo.m_InputLayerNormWeights =
1003  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
1004  }
1005 
1006  paramsInfo.m_ForgetLayerNormWeights =
1007  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
1008  paramsInfo.m_CellLayerNormWeights =
1009  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
1010  paramsInfo.m_OutputLayerNormWeights =
1011  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
1012  }
1013 
1014  result = layerSupportObject.IsQLstmSupported(input,
1015  previousOutputIn,
1016  previousCellStateIn,
1017  outputStateOut,
1018  cellStateOut,
1019  output,
1020  descriptor,
1021  paramsInfo,
1022  reason);
1023  break;
1024  }
1026  {
1027  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1028 
1029  // Inputs
1030  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1031  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1032  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
1033 
1034  // Outputs
1035  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1036  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1037 
1038  // QuantizedLstm parameters
1039  QuantizedLstmInputParamsInfo paramsInfo;
1040 
1041  paramsInfo.m_InputToInputWeights =
1042  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1043  paramsInfo.m_InputToForgetWeights =
1044  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1045  paramsInfo.m_InputToCellWeights =
1046  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1047  paramsInfo.m_InputToOutputWeights =
1048  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1049 
1050  paramsInfo.m_RecurrentToInputWeights =
1051  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1052  paramsInfo.m_RecurrentToForgetWeights =
1053  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1054  paramsInfo.m_RecurrentToCellWeights =
1055  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1056  paramsInfo.m_RecurrentToOutputWeights =
1057  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1058 
1059  paramsInfo.m_InputGateBias =
1060  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1061  paramsInfo.m_ForgetGateBias =
1062  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1063  paramsInfo.m_CellBias =
1064  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1065  paramsInfo.m_OutputGateBias =
1066  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1067 
1068  result = layerSupportObject.IsQuantizedLstmSupported(input,
1069  previousCellStateIn,
1070  previousOutputIn,
1071  cellStateOut,
1072  output,
1073  paramsInfo,
1074  reason);
1075  break;
1076  }
1077  case LayerType::Division:
1078  {
1080  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1081  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1082  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1083  result = layerSupportObject.IsDivisionSupported(
1084  OverrideDataType(input0, dataType),
1085  OverrideDataType(input1, dataType),
1086  OverrideDataType(output, dataType),
1087  reason);
1089  break;
1090  }
1091  case LayerType::Rank:
1092  {
1093  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1094  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1095  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1096  OverrideDataType(output, dataType),
1097  reason);
1098  break;
1099  }
1100  case LayerType::Reshape:
1101  {
1102  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1103  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1104  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1105  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1106  OverrideDataType(output, dataType),
1107  cLayer->GetParameters(),
1108  reason);
1109  break;
1110  }
1111  case LayerType::Resize:
1112  {
1113  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1114  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1115  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1116  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1117  OverrideDataType(output, dataType),
1118  cLayer->GetParameters(),
1119  reason);
1120  break;
1121  }
1122  case LayerType::Shape:
1123  {
1124  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1125  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1126 
1127  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1128  OverrideDataType(output, dataType),
1129  reason);
1130  break;
1131  }
1132  case LayerType::Slice:
1133  {
1134  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1135 
1136  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1137  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1138 
1139  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1140  OverrideDataType(output, dataType),
1141  cLayer->GetParameters(),
1142  reason);
1143  break;
1144  }
1145  case LayerType::Softmax:
1146  {
1147  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1148  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1149  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1150  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1151  OverrideDataType(output, dataType),
1152  cLayer->GetParameters(),
1153  reason);
1154  break;
1155  }
1157  {
1158  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1159  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1160  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1161  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1162  OverrideDataType(output, dataType),
1163  cLayer->GetParameters(),
1164  reason);
1165  break;
1166  }
1168  {
1169  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1170 
1171  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1172  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1173 
1174  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1175  OverrideDataType(output, dataType),
1176  cLayer->GetParameters(),
1177  reason);
1178  break;
1179  }
1180  case LayerType::Splitter:
1181  {
1182  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1183  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1184 
1185  // Get vector of all outputs.
1186  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1187  {
1188  return OverrideDataType(slot.GetTensorInfo(), dataType);
1189  };
1190  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1191  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1192  std::vector<TensorInfo> outputs(beginI, endI);
1193 
1194  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1195 
1196  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1197  outputPtrs,
1198  cLayer->GetParameters(),
1199  reason);
1200  break;
1201  }
1202  case LayerType::Stack:
1203  {
1204  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1205 
1206  // Get vector of all inputs.
1207  auto getTensorInfo = [&dataType](const InputSlot& slot)
1208  {
1209  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1210  };
1211  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1212  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1213  std::vector<TensorInfo> inputs(beginI, endI);
1214 
1215  auto getTensorInfoPtr = [](const TensorInfo& info)
1216  {
1217  return &info;
1218  };
1219  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1220  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1221  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1222 
1223  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1224 
1225  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1226 
1227  break;
1228  }
1229  case LayerType::StandIn:
1230  {
1231  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1232 
1233  // Get vector of all inputs.
1234  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1235  {
1236  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1237  };
1238  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1239  {
1240  return OverrideDataType(slot.GetTensorInfo(), dataType);
1241  };
1242  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1243  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1244  std::vector<TensorInfo> inputs(beginI, endI);
1245 
1246  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1247  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1248  std::vector<TensorInfo> outputs(beginO, endO);
1249 
1250 
1251  auto getTensorInfoPtr = [](const TensorInfo& info)
1252  {
1253  return &info;
1254  };
1255  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1256  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1257  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1258 
1259  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1260  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1261  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1262 
1263 
1264  result = layerSupportObject.IsStandInSupported(inputPtrs,
1265  outputPtrs,
1266  cLayer->GetParameters(),
1267  reason);
1268  break;
1269  }
1271  {
1272  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1273  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1274  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1275  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1276  OverrideDataType(output, dataType),
1277  cLayer->GetParameters(),
1278  reason);
1279  break;
1280  }
1282  {
1284  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1285  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1286  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1287  result = layerSupportObject.IsSubtractionSupported(
1288  OverrideDataType(input0, dataType),
1289  OverrideDataType(input1, dataType),
1290  OverrideDataType(output, dataType),
1291  reason);
1293  break;
1294  }
1295  case LayerType::Switch:
1296  {
1297  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1298  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1299  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1300  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1301  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1302  OverrideDataType(input1, dataType),
1303  OverrideDataType(output0, dataType),
1304  OverrideDataType(output1, dataType),
1305  reason);
1306  break;
1307  }
1308  case LayerType::Mean:
1309  {
1310  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1311  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1312  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1313  result = layerSupportObject.IsMeanSupported(
1314  OverrideDataType(input, dataType),
1315  OverrideDataType(output, dataType),
1316  cLayer->GetParameters(),
1317  reason);
1318  break;
1319  }
1320  case LayerType::Minimum:
1321  {
1323  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1324  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1325  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1326  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1327  OverrideDataType(input1, dataType),
1328  OverrideDataType(output, dataType),
1329  reason);
1331  break;
1332  }
1333  case LayerType::Prelu:
1334  {
1335  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1336  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1337  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1338  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1339  OverrideDataType(alpha, dataType),
1340  OverrideDataType(output, dataType),
1341  reason);
1342  break;
1343  }
1344  case LayerType::Transpose:
1345  {
1346  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1347  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1348  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1349  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1350  OverrideDataType(output, dataType),
1351  cLayer->GetParameters(),
1352  reason);
1353  break;
1354  }
1356  {
1357  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1358 
1359  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1360  dataType);
1361  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1362 
1363  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1364 
1365  Optional<TensorInfo> biases;
1366  if (descriptor.m_BiasEnabled)
1367  {
1368  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1369  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1370  GetBiasTypeFromWeightsType(dataType));
1371  }
1372 
1373  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1374  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1375 
1376  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1377  output,
1378  descriptor,
1379  weights,
1380  biases,
1381  reason);
1382 
1383  break;
1384  }
1385  case LayerType::Reduce:
1386  {
1387  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1388  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1389  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1390 
1391  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1392  OverrideDataType(output, dataType),
1393  cLayer->GetParameters(),
1394  reason);
1395  break;
1396  }
1398  {
1399  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1400  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1401 
1402  // All inputs.
1403  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1404  dataType);
1405  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1406  dataType);
1407  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1408  dataType);
1409  // Outputs
1410  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1411  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1412  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1413 
1414  // Basic parameters
1415  const TensorInfo& inputToForgetWeights
1416  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1417  const TensorInfo& inputToCellWeights
1418  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1419  const TensorInfo& inputToOutputWeights
1420  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1421  const TensorInfo& recurrentToForgetWeights
1422  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1423  const TensorInfo& recurrentToCellWeights
1424  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1425  const TensorInfo& recurrentToOutputWeights
1426  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1427  const TensorInfo& forgetGateBias
1428  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1429  const TensorInfo& cellBias
1430  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1431  const TensorInfo& outputGateBias
1432  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1433 
1434  LstmInputParamsInfo paramsInfo;
1435 
1436  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1437  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1438  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1439  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1440  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1441  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1442  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1443  paramsInfo.m_CellBias = &cellBias;
1444  paramsInfo.m_OutputGateBias = &outputGateBias;
1445 
1446  // Optional parameters
1447  TensorInfo optInputToInputWeights;
1448  TensorInfo optRecurrentToInputWeights;
1449  TensorInfo optCellToInputWeights;
1450  TensorInfo optInputGateBias;
1451  TensorInfo optProjectionWeights;
1452  TensorInfo optProjectionBias;
1453  TensorInfo optCellToForgetWeights;
1454  TensorInfo optCellToOutputWeights;
1455  TensorInfo optInputLayerNormWeights;
1456  TensorInfo optForgetLayerNormWeights;
1457  TensorInfo optCellLayerNormWeights;
1458  TensorInfo optOutputLayerNormWeights;
1459 
1460  if(!descriptor.m_CifgEnabled)
1461  {
1462  optInputToInputWeights =
1463  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1464  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1465 
1466  optRecurrentToInputWeights =
1467  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1468  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1469  optInputGateBias =
1470  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1471  paramsInfo.m_InputGateBias = &optInputGateBias;
1472  }
1473 
1474  if(descriptor.m_ProjectionEnabled)
1475  {
1476  optProjectionWeights =
1477  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1478  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1479  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1480  {
1481  optProjectionBias =
1482  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1483  paramsInfo.m_ProjectionBias = &optProjectionBias;
1484  }
1485  }
1486 
1487  if(descriptor.m_PeepholeEnabled)
1488  {
1489  if(!descriptor.m_CifgEnabled)
1490  {
1491  optCellToInputWeights =
1492  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1493  dataType);
1494  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1495  }
1496  optCellToForgetWeights =
1497  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1498  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1499  optCellToOutputWeights =
1500  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1501  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1502  }
1503 
1504  if(descriptor.m_LayerNormEnabled)
1505  {
1506  if (!descriptor.m_CifgEnabled)
1507  {
1508  optInputLayerNormWeights = OverrideDataType(
1509  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1510  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1511  }
1512 
1513  optForgetLayerNormWeights = OverrideDataType(
1514  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1515  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1516 
1517  optCellLayerNormWeights = OverrideDataType(
1518  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1519  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1520 
1521  optOutputLayerNormWeights = OverrideDataType(
1522  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1523  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1524  }
1525 
1526  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1527  outputStateIn,
1528  cellStateIn,
1529  outputStateOut,
1530  cellStateOut,
1531  output,
1532  descriptor,
1533  paramsInfo,
1534  reason);
1535  break;
1536  }
1537  default:
1538  {
1539  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1540  reason.value() = "Unrecognised layer type";
1541  result = false;
1542  break;
1543  }
1544  }
1545  return result;
1546 }
1547 
1549  const IConnectableLayer& connectableLayer,
1550  Optional<DataType> dataType,
1551  std::string& outReasonIfUnsupported)
1552 {
1553  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1554 }
1555 
1557  Optional<DataType> dataType,
1558  std::string& outReasonIfUnsupported)
1559 {
1560  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1561  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1562 }
1563 
1565  Optional<DataType> dataType,
1566  std::string& outReasonIfUnsupported,
1567  const ModelOptions& modelOptions)
1568 {
1569  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1570  return IsLayerConfigurationSupported(layer->GetBackendId(),
1571  connectableLayer,
1572  dataType,
1573  outReasonIfUnsupported,
1574  modelOptions);
1575 }
1576 
1578  const IConnectableLayer& connectableLayer,
1579  Optional<DataType> dataType,
1580  std::string& outReasonIfUnsupported,
1581  const ModelOptions& modelOptions)
1582 {
1583  return IsLayerConfigurationSupported(backendId,
1584  connectableLayer,
1585  dataType,
1586  outReasonIfUnsupported,
1587  modelOptions);
1588 }
1590 std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
1591  const QueueDescriptor& descriptor,
1592  const WorkloadInfo& info) const
1593 {
1594  switch(type)
1595  {
1596  case LayerType::Activation :
1597  {
1598  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
1599  return CreateActivation(*activationQueueDescriptor, info);
1600  }
1601  case LayerType::Addition :
1602  {
1603  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
1604  return CreateAddition(*additionQueueDescriptor, info);
1605  }
1606  case LayerType::ArgMinMax :
1607  {
1608  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
1609  return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
1610  }
1612  {
1613  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
1614  return CreateBatchNormalization(*batchNormQueueDescriptor, info);
1615  }
1617  {
1618  auto batchToSpaceNdQueueDescriptor
1619  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
1620  return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
1621  }
1622  case LayerType::Cast :
1623  {
1624  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
1625  return CreateCast(*castQueueDescriptor, info);
1626  }
1628  {
1629  auto channelShuffleQueueDescriptor
1630  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
1631  return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
1632  }
1633  case LayerType::Comparison :
1634  {
1635  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
1636  return CreateComparison(*comparisonQueueDescriptor, info);
1637  }
1638  case LayerType::Concat :
1639  {
1640  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
1641  return CreateConcat(*concatQueueDescriptor, info);
1642  }
1643  case LayerType::Constant :
1644  {
1645  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
1646  return CreateConstant(*constantQueueDescriptor, info);
1647  }
1649  {
1650  auto convertFp16ToFp32QueueDescriptor
1651  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
1652  return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
1653  }
1655  {
1656  auto convertFp32ToFp16QueueDescriptor
1657  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
1658  return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
1659  }
1661  {
1662  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
1663  return CreateConvolution2d(*convolution2dQueueDescriptor, info);
1664  }
1666  {
1667  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
1668  return CreateConvolution3d(*convolution3dQueueDescriptor, info);
1669  }
1670  case LayerType::Debug:
1671  {
1672  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
1673  return CreateDebug(*debugQueueDescriptor, info);
1674  }
1676  {
1677  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
1678  return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
1679  }
1681  {
1682  auto depthwiseConvolution2DQueueDescriptor
1683  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
1684  return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
1685  }
1686  case LayerType::Dequantize:
1687  {
1688  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
1689  return CreateDequantize(*dequantizeQueueDescriptor, info);
1690  }
1692  {
1693  auto detectionPostProcessQueueDescriptor
1694  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
1695  return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
1696  }
1697  case LayerType::Division:
1698  {
1699  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
1700  return CreateDivision(*divisionQueueDescriptor, info);
1701  }
1703  {
1704  auto queueDescriptor = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
1705  return CreateWorkload(LayerType::ElementwiseBinary, *queueDescriptor, info);
1706  }
1708  {
1709  auto elementwiseUnaryQueueDescriptor
1710  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
1711  return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
1712 
1713  }
1715  {
1716  auto fakeQuantizationQueueDescriptor
1717  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
1718  return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
1719  }
1720  case LayerType::Fill:
1721  {
1722  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
1723  return CreateFill(*fillQueueDescriptor, info);
1724  }
1725  case LayerType::Floor:
1726  {
1727  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
1728  return CreateFloor(*floorQueueDescriptor, info);
1729  }
1731  {
1732  auto fullyConnectedQueueDescriptor
1733  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
1734  return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
1735  }
1736  case LayerType::Gather:
1737  {
1738  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
1739  return CreateGather(*gatherQueueDescriptor, info);
1740  }
1741  case LayerType::Input:
1742  {
1743  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
1744  return CreateInput(*inputQueueDescriptor, info);
1745  }
1747  {
1748  auto instanceNormalizationQueueDescriptor
1749  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
1750  return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
1751  }
1753  {
1754  auto l2NormalizationQueueDescriptor
1755  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
1756  return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
1757  }
1759  {
1760  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
1761  return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
1762  }
1763  case LayerType::LogSoftmax:
1764  {
1765  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
1766  return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
1767  }
1768  case LayerType::Lstm:
1769  {
1770  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
1771  return CreateLstm(*lstmQueueDescriptor, info);
1772  }
1773  case LayerType::Maximum:
1774  {
1775  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
1776  return CreateMaximum(*maximumQueueDescriptor, info);
1777  }
1778  case LayerType::Mean:
1779  {
1780  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
1781  return CreateMean(*meanQueueDescriptor, info);
1782  }
1783  case LayerType::MemCopy:
1784  {
1785  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
1786  return CreateMemCopy(*memCopyQueueDescriptor, info);
1787  }
1788  case LayerType::MemImport:
1789  {
1790  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
1791  return CreateMemImport(*memImportQueueDescriptor, info);
1792  }
1793  case LayerType::Minimum:
1794  {
1795  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
1796  return CreateMinimum(*minimumQueueDescriptor, info);
1797  }
1799  {
1800  auto multiplicationQueueDescriptor
1801  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
1802  return CreateMultiplication(*multiplicationQueueDescriptor, info);
1803  }
1805  {
1806  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
1807  return CreateNormalization(*normalizationQueueDescriptor, info);
1808  }
1809  case LayerType::Output:
1810  {
1811  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
1812  return CreateOutput(*outputQueueDescriptor, info);
1813  }
1814  case LayerType::Pad:
1815  {
1816  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
1817  return CreatePad(*padQueueDescriptor, info);
1818  }
1819  case LayerType::Permute:
1820  {
1821  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
1822  return CreatePermute(*permuteQueueDescriptor, info);
1823  }
1824  case LayerType::Pooling2d:
1825  {
1826  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
1827  return CreatePooling2d(*pooling2dQueueDescriptor, info);
1828  }
1829  case LayerType::Pooling3d:
1830  {
1831  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
1832  return CreatePooling3d(*pooling3dQueueDescriptor, info);
1833  }
1835  {
1836  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
1837  return CreatePreCompiled(*preCompiledQueueDescriptor, info);
1838  }
1839  case LayerType::Prelu:
1840  {
1841  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
1842  return CreatePrelu(*preluQueueDescriptor, info);
1843  }
1844  case LayerType::QLstm:
1845  {
1846  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
1847  return CreateQLstm(*qlstmQueueDescriptor, info);
1848  }
1849  case LayerType::Quantize:
1850  {
1851  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
1852  return CreateQuantize(*quantizeQueueDescriptor, info);
1853  }
1854  case LayerType::Rank:
1855  {
1856  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
1857  return CreateRank(*rankQueueDescriptor, info);
1858  }
1859  case LayerType::Reduce:
1860  {
1861  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
1862  return CreateReduce(*reduceQueueDescriptor, info);
1863  }
1864  case LayerType::Reshape:
1865  {
1866  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
1867  return CreateReshape(*reshapeQueueDescriptor, info);
1868  }
1869  case LayerType::Resize:
1870  {
1871  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
1872  return CreateResize(*resizeQueueDescriptor, info);
1873  }
1874  case LayerType::Shape:
1875  {
1876  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
1877  return CreateShape(*shapeQueueDescriptor, info);
1878  }
1879  case LayerType::Slice:
1880  {
1881  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
1882  return CreateSlice(*sliceQueueDescriptor, info);
1883  }
1884  case LayerType::Softmax:
1885  {
1886  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
1887  return CreateSoftmax(*softmaxQueueDescriptor, info);
1888  }
1890  {
1891  auto spaceToBatchNdQueueDescriptor
1892  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
1893  return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
1894  }
1896  {
1897  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
1898  return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
1899  }
1900  case LayerType::Splitter:
1901  {
1902  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
1903  return CreateSplitter(*splitterQueueDescriptor, info);
1904  }
1905  case LayerType::Stack:
1906  {
1907  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
1908  return CreateStack(*stackQueueDescriptor, info);
1909  }
1911  {
1912  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
1913  return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
1914  }
1916  {
1917  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
1918  return CreateSubtraction(*subtractionQueueDescriptor, info);
1919  }
1920  case LayerType::Transpose:
1921  {
1922  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
1923  return CreateTranspose(*transposeQueueDescriptor, info);
1924  }
1926  {
1927  auto transposeConvolution2dQueueDescriptor
1928  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
1929  return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
1930  }
1932  {
1933  auto unidirectionalSequenceLstmQueueDescriptor
1934  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
1935  return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
1936  }
1937  default:
1938  return nullptr;
1939  }
1940 }
1942 
1943 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1944  const WorkloadInfo& /*info*/) const
1945 {
1946  return std::unique_ptr<IWorkload>();
1947 }
1948 
1949 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1950  const WorkloadInfo& /*info*/) const
1951 {
1952  return std::unique_ptr<IWorkload>();
1953 }
1954 
1955 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1956  const WorkloadInfo& /*info*/) const
1957 {
1958  return std::unique_ptr<IWorkload>();
1959 }
1960 
1961 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1962  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1963 {
1964  return std::unique_ptr<IWorkload>();
1965 }
1966 
1967 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1968  const WorkloadInfo& /*Info*/) const
1969 {
1970  return std::unique_ptr<IWorkload>();
1971 }
1972 
1973 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1974  const WorkloadInfo& /*info*/) const
1975 {
1976  return std::unique_ptr<IWorkload>();
1977 }
1978 
1979 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1980  const WorkloadInfo& /*info*/) const
1981 {
1982  return std::unique_ptr<IWorkload>();
1983 }
1984 
1985 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1986  const WorkloadInfo& /*info*/) const
1987 {
1988  return std::unique_ptr<IWorkload>();
1989 }
1990 
1991 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1992  const WorkloadInfo& /*info*/) const
1993 {
1994  return std::unique_ptr<IWorkload>();
1995 }
1996 
1997 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1998  const WorkloadInfo& /*info*/) const
1999 {
2000  return std::unique_ptr<IWorkload>();
2001 }
2002 
2003 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
2004  const WorkloadInfo& /*info*/) const
2005 {
2006  return std::unique_ptr<IWorkload>();
2007 }
2008 
2009 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
2010  const WorkloadInfo& /*info*/) const
2011 {
2012  return std::unique_ptr<IWorkload>();
2013 }
2014 
2015 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
2016  const WorkloadInfo& /*info*/) const
2017 {
2018  return std::unique_ptr<IWorkload>();
2019 }
2020 
2021 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
2022  const WorkloadInfo& /*info*/) const
2023 {
2024  return std::unique_ptr<IWorkload>();
2025 }
2026 
2027 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
2028  const WorkloadInfo& /*info*/) const
2029 {
2030  return std::unique_ptr<IWorkload>();
2031 }
2032 
2033 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
2034  const WorkloadInfo& /*info*/) const
2035 {
2036  return std::unique_ptr<IWorkload>();
2037 }
2038 
2039 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
2040  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2041 {
2042  return std::unique_ptr<IWorkload>();
2043 }
2044 
2045 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
2046  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2047 {
2048  return std::unique_ptr<IWorkload>();
2049 }
2050 
2051 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
2052  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2053 {
2054  return std::unique_ptr<IWorkload>();
2055 }
2056 
2057 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
2058  const WorkloadInfo& /*info*/) const
2059 {
2060  return std::unique_ptr<IWorkload>();
2061 }
2062 
2063 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2064  const WorkloadInfo& /*info*/) const
2065 {
2066  return std::unique_ptr<IWorkload>();
2067 }
2068 
2069 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
2070  const WorkloadInfo& /*info*/) const
2071 {
2072  return std::unique_ptr<IWorkload>();
2073 }
2074 
2075 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
2076  const WorkloadInfo& /*info*/) const
2077 {
2078  return std::unique_ptr<IWorkload>();
2079 }
2080 
2081 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
2082  const WorkloadInfo& /*info*/) const
2083 {
2084  return std::unique_ptr<IWorkload>();
2085 }
2086 
2087 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
2088  const WorkloadInfo& /*info*/) const
2089 {
2090  return std::unique_ptr<IWorkload>();
2091 }
2092 
2093 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
2094  const WorkloadInfo& /*info*/) const
2095 {
2096  return std::unique_ptr<IWorkload>();
2097 }
2098 
2099 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
2100  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
2101  const WorkloadInfo& /*info*/) const
2102 {
2103  return std::unique_ptr<IWorkload>();
2104 }
2105 
2106 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
2107  const WorkloadInfo& /*info*/) const
2108 {
2109  return std::unique_ptr<IWorkload>();
2110 }
2111 
2112 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
2113  const WorkloadInfo& /*info*/) const
2114 {
2115  return std::unique_ptr<IWorkload>();
2116 }
2117 
2118 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2119  const WorkloadInfo& /*info*/) const
2120 {
2121  return std::unique_ptr<IWorkload>();
2122 }
2123 
2124 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
2125  const WorkloadInfo& /*info*/) const
2126 {
2127  return std::unique_ptr<IWorkload>();
2128 }
2129 
2130 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
2131  const WorkloadInfo& /*info*/) const
2132 {
2133  return std::unique_ptr<IWorkload>();
2134 }
2135 
2136 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
2137  const WorkloadInfo& /*info*/) const
2138 {
2139  return std::unique_ptr<IWorkload>();
2140 }
2141 
2142 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
2143  const WorkloadInfo& /*Info*/) const
2144 {
2145  return std::unique_ptr<IWorkload>();
2146 }
2147 
2148 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
2149  const WorkloadInfo& /*info*/) const
2150 {
2151  return std::unique_ptr<IWorkload>();
2152 }
2153 
2154 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
2155  const WorkloadInfo& /*info*/) const
2156 {
2157  return std::unique_ptr<IWorkload>();
2158 }
2159 
2160 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
2161  const WorkloadInfo& /*info*/) const
2162 {
2163  return std::unique_ptr<IWorkload>();
2164 }
2165 
2166 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
2167  const WorkloadInfo& /*info*/) const
2168 {
2169  return std::unique_ptr<IWorkload>();
2170 }
2171 
2172 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
2173  const WorkloadInfo& /*info*/) const
2174 {
2175  return std::unique_ptr<IWorkload>();
2176 }
2177 
2178 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
2179  const WorkloadInfo& /*info*/) const
2180 {
2181  return std::unique_ptr<IWorkload>();
2182 }
2183 
2184 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
2185  const WorkloadInfo& /*info*/) const
2186 {
2187  return std::unique_ptr<IWorkload>();
2188 }
2189 
2190 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
2191  const WorkloadInfo& /*Info*/) const
2192 {
2193  return std::unique_ptr<IWorkload>();
2194 }
2195 
2196 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
2197  const WorkloadInfo& /*info*/) const
2198 {
2199  return std::unique_ptr<IWorkload>();
2200 }
2201 
2202 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
2203  const WorkloadInfo& /*info*/) const
2204 {
2205  return std::unique_ptr<IWorkload>();
2206 }
2207 
2208 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/,
2209  const WorkloadInfo& /*info*/) const
2210 {
2211  return std::unique_ptr<IWorkload>();
2212 }
2213 
2214 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
2215  const WorkloadInfo& /*info*/) const
2216 {
2217  return std::unique_ptr<IWorkload>();
2218 }
2219 
2220 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
2221  const WorkloadInfo &/*info*/) const
2222 {
2223  return std::unique_ptr<IWorkload>();
2224 }
2225 
2226 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
2227  const WorkloadInfo& /*Info*/) const
2228 {
2229  return std::unique_ptr<IWorkload>();
2230 }
2231 
2232 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
2233  const WorkloadInfo& /*info*/) const
2234 {
2235  return std::unique_ptr<IWorkload>();
2236 }
2237 
2238 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
2239  const WorkloadInfo& /*info*/) const
2240 {
2241  return std::unique_ptr<IWorkload>();
2242 }
2243 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
2244  const WorkloadInfo& /*info*/) const
2245 {
2246  return std::unique_ptr<IWorkload>();
2247 }
2248 
2249 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
2250  const WorkloadInfo& /*info*/) const
2251 {
2252  return std::unique_ptr<IWorkload>();
2253 }
2254 
2255 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
2256  const WorkloadInfo& /*info*/) const
2257 {
2258  return std::unique_ptr<IWorkload>();
2259 }
2260 
2261 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
2262  const WorkloadInfo& /*info*/) const
2263 {
2264  return std::unique_ptr<IWorkload>();
2265 }
2266 
2267 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
2268  const WorkloadInfo& /*info*/) const
2269 {
2270  return std::unique_ptr<IWorkload>();
2271 }
2272 
2273 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
2274  const WorkloadInfo& /*info*/) const
2275 {
2276  return std::unique_ptr<IWorkload>();
2277 }
2278 
2279 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
2280  const WorkloadInfo& /*info*/) const
2281 {
2282  return std::unique_ptr<IWorkload>();
2283 }
2284 
2285 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
2286  const WorkloadInfo& /*info*/) const
2287 {
2288  return std::unique_ptr<IWorkload>();
2289 }
2290 
2291 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
2292  const WorkloadInfo& /*info*/) const
2293 {
2294  return std::unique_ptr<IWorkload>();
2295 }
2296 
2297 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
2298  const WorkloadInfo& /*info*/) const
2299 {
2300  return std::unique_ptr<IWorkload>();
2301 }
2302 
2303 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
2304  const WorkloadInfo& /*info*/) const
2305 {
2306  return std::unique_ptr<IWorkload>();
2307 }
2308 
2309 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
2310  const WorkloadInfo& /*info*/) const
2311 {
2312  return std::unique_ptr<IWorkload>();
2313 }
2314 
2315 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
2316  const WorkloadInfo& /*info*/) const
2317 {
2318  return std::unique_ptr<IWorkload>();
2319 }
2320 
2321 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
2322  const WorkloadInfo& /*info*/) const
2323 {
2324  return std::unique_ptr<IWorkload>();
2325 }
2326 
2327 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
2328  const WorkloadInfo& /*info*/) const
2329 {
2330  return std::unique_ptr<IWorkload>();
2331 }
2332 
2333 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
2334  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
2335  const WorkloadInfo& /*info*/) const
2336 {
2337  return std::unique_ptr<IWorkload>();
2338 }
2339 
2340 std::unique_ptr<IWorkload> IWorkloadFactory::CreateUnidirectionalSequenceLstm(
2341  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
2342  const WorkloadInfo& /*info*/) const
2343 {
2344  return std::unique_ptr<IWorkload>();
2345 }
2346 
2347 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInput(
2348  const InputQueueDescriptor& /*descriptor*/,
2349  const WorkloadInfo& /*info*/) const
2350 {
2351  return std::unique_ptr<IWorkload>();
2352 }
2353 
2354 } // namepsace armnn
armnn::LayerType::Floor
@ Floor
armnn::LayerType::MemCopy
@ MemCopy
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Transpose
@ Transpose
armnn::BackendRegistryInstance
BackendRegistry & BackendRegistryInstance()
Definition: BackendRegistry.cpp:15
TransformIterator.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::DataType::QAsymmU8
@ QAsymmU8
ILayerSupport.hpp
IBackendInternal.hpp
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Quantize
@ Quantize
BackendHelper.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
PolymorphicDowncast.hpp
armnn::LayerType::Shape
@ Shape
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Merge
@ Merge
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
WorkloadFactory.hpp
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Addition
@ Addition
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::Division
@ Division
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Normalization
@ Normalization
armnn::UnidirectionalSequenceLstmDescriptor
LstmDescriptor UnidirectionalSequenceLstmDescriptor
Definition: Descriptors.hpp:1136
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Rank
@ Rank
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::InputQueueDescriptor
MemCopyQueueDescriptor InputQueueDescriptor
Definition: WorkloadData.hpp:91
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::DataType::Float32
@ Float32
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1548
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:14
armnn::LayerType::GatherNd
@ GatherNd
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
Layer.hpp
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::StandIn
@ StandIn
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
BackendRegistry.hpp
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::LayerType::Mean
@ Mean
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::LayerType::Switch
@ Switch
armnn::Optional
Definition: Optional.hpp:270
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Definition: WorkloadFactory.cpp:1590
armnn::DataType::QSymmS8
@ QSymmS8
armnn::ActivationQueueDescriptor
Definition: WorkloadData.hpp:158
armnn::LayerType::Concat
@ Concat
armnn::MakeTransformIterator
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
Definition: TransformIterator.hpp:86
armnn::DataType::QSymmS16
@ QSymmS16
armnn::LayerType::Cast
@ Cast
LayersFwd.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::LogSoftmax
@ LogSoftmax
Types.hpp
armnn::LayerType::Output
@ Output
armnn::DataType::Boolean
@ Boolean
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::Dequantize
@ Dequantize
armnn::OutputQueueDescriptor
MemCopyQueueDescriptor OutputQueueDescriptor
Definition: WorkloadData.hpp:92
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::PreCompiled
@ PreCompiled
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34