ArmNN
 22.05
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
20 #include <sstream>
21 
22 namespace armnn
23 {
24 
25 namespace
26 {
27 using LayerList = std::list<Layer*>;
28 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
29 
30 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
31 {
32  if (!type)
33  {
34  return info;
35  }
36 
37  return TensorInfo(info.GetShape(),
38  type.value(),
39  info.GetQuantizationScale(),
40  info.GetQuantizationOffset(),
41  info.IsConstant());
42 }
43 
44 } // anonymous namespace
45 
47 {
48  if (!weightsType)
49  {
50  return weightsType;
51  }
52 
53  switch(weightsType.value())
54  {
58  return weightsType;
64  default:
65  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
66  }
67  return armnn::EmptyOptional();
68 }
69 
70 
71 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
72  const IConnectableLayer& connectableLayer,
73  Optional<DataType> dataType,
74  std::string& outReasonIfUnsupported,
75  const ModelOptions& modelOptions)
76 {
77  Optional<std::string&> reason = outReasonIfUnsupported;
78  bool result;
79  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
80 
81  auto const& backendRegistry = BackendRegistryInstance();
82  if (!backendRegistry.IsBackendRegistered(backendId))
83  {
84  std::stringstream ss;
85  ss << connectableLayer.GetName() << " is not supported on " << backendId
86  << " because this backend is not registered.";
87 
88  outReasonIfUnsupported = ss.str();
89  return false;
90  }
91 
92  auto backendFactory = backendRegistry.GetFactory(backendId);
93  auto backendObject = backendFactory();
94  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
112  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
114  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
115  result = layerSupportObject.IsAdditionSupported(
116  OverrideDataType(input0, dataType),
117  OverrideDataType(input1, dataType),
118  OverrideDataType(output, dataType),
119  reason);
120  break;
121  }
123  {
124  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
125  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
126 
127  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
128  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
129  result = layerSupportObject.IsArgMinMaxSupported(
130  OverrideDataType(input, dataType),
131  OverrideDataType(output, DataType::Signed32),
132  descriptor,
133  reason);
134  break;
135  }
137  {
138  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
139  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
140  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
141  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
142  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
143  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
144  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
145  result = layerSupportObject.IsBatchNormalizationSupported(
146  OverrideDataType(input, dataType),
147  OverrideDataType(output, dataType),
148  OverrideDataType(mean, dataType),
149  OverrideDataType(var, dataType),
150  OverrideDataType(beta, dataType),
151  OverrideDataType(gamma, dataType),
152  cLayer->GetParameters(),
153  reason);
154  break;
155  }
157  {
158  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
159  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
160  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
161 
162  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
163  OverrideDataType(output, dataType),
164  cLayer->GetParameters(),
165  reason);
166  break;
167  }
168  case LayerType::Cast:
169  {
170  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
171  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
172 
173  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
174  OverrideDataType(output, dataType),
175  reason);
176  break;
177  }
179  {
180  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
181 
182  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
183  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
184 
185  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
186 
187  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
188  OverrideDataType(output, dataType),
189  descriptor,
190  reason);
191  break;
192  }
194  {
195  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
196 
197  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
198  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
199  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
200 
201  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
202  OverrideDataType(input1, dataType),
203  OverrideDataType(output, DataType::Boolean),
204  cLayer->GetParameters(),
205  reason);
206  break;
207  }
208  case LayerType::Constant:
209  {
210  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
211  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
212  break;
213  }
215  {
216  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
217  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
218  result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
219  break;
220  }
222  {
223  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
224  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
225  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
226  break;
227  }
229  {
230  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
231  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
232  result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
233  break;
234  }
236  {
237  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
238  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
239  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
240  break;
241  }
243  {
244  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
245 
246  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
247  dataType);
248  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
249  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
250  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
251  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
252  dataType);
253 
254  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
255 
256  // Construct optional biases object based on the value of m_BiasEnabled
257  Optional<TensorInfo> biases;
258  if (descriptor.m_BiasEnabled)
259  {
260  ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
261  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
262  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
263  GetBiasTypeFromWeightsType(dataType));
264  }
265 
266  result = layerSupportObject.IsConvolution2dSupported(
267  input,
268  output,
269  descriptor,
270  weights,
271  biases,
272  reason);
273  break;
274  }
276  {
277  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
278 
279  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
280  dataType);
281  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
282 
283  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
284  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
285  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
286  dataType);
287 
288  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
289 
290  // Construct optional biases object based on the value of m_BiasEnabled
291  Optional<TensorInfo> biases;
292  if (descriptor.m_BiasEnabled)
293  {
294  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
295  GetBiasTypeFromWeightsType(dataType));
296  }
297 
298  result = layerSupportObject.IsConvolution3dSupported(
299  input,
300  output,
301  descriptor,
302  weights,
303  biases,
304  reason);
305  break;
306  }
307  case LayerType::Debug:
308  {
309  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
310  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
311 
312  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
313  OverrideDataType(output, dataType),
314  reason);
315  break;
316  }
318  {
319  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
320 
321  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
322  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
323 
324  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
325  OverrideDataType(output, dataType),
326  cLayer->GetParameters(),
327  reason);
328  break;
329  }
331  {
332  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
333  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
334  dataType);
335  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
336  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
337  dataType);
338 
339  ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
340 
341  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
342 
343  // Construct optional biases object based on the value of m_BiasEnabled
344  Optional<TensorInfo> biases;
345  if (descriptor.m_BiasEnabled)
346  {
347  biases = OverrideDataType(cLayer->GetInputSlot(2).GetConnection()->GetTensorInfo(),
348  GetBiasTypeFromWeightsType(dataType));
349  }
350 
351  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
352  output,
353  descriptor,
354  weights,
355  biases,
356  reason);
357  break;
358  }
360  {
361  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
362  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
363 
364  result = layerSupportObject.IsDequantizeSupported(input,
365  OverrideDataType(output, dataType),
366  reason);
367  break;
368  }
370  {
371  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
372  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
373  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
374  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
375 
376  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
377  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
378  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
379  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
380 
381  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
382  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
383  scores,
384  anchors,
385  detectionBoxes,
386  detectionClasses,
387  detectionScores,
388  numDetections,
389  descriptor,
390  reason);
391  break;
392  }
394  {
395  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
396 
397  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
398  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
399 
400  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
401  OverrideDataType(output, dataType),
402  cLayer->GetParameters(),
403  reason);
404  break;
405  }
406  case LayerType::Fill:
407  {
408  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
409  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
410  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
411  const FillDescriptor& descriptor = cLayer->GetParameters();
412 
413  result = layerSupportObject.IsFillSupported(
414  OverrideDataType(input, dataType),
415  OverrideDataType(output, dataType),
416  descriptor,
417  reason);
418  break;
419  }
421  {
422  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
423  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
424  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
425  cLayer->GetParameters(),
426  reason);
427  break;
428  }
429  case LayerType::Floor:
430  {
431  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
432  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
433  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
434  OverrideDataType(output, dataType),
435  reason);
436  break;
437  }
439  {
440  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
441  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
442  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
443 
444  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
445  TensorInfo weightsInfo;
446  const TensorInfo* weightsInfoPtr = nullptr;
447 
448  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
449  weightsInfoPtr = &weightsInfo;
450 
451  TensorInfo biasInfo;
452  const TensorInfo* biasInfoPtr = nullptr;
453  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
454  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
455  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
456  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
457 
458  if (descriptor.m_BiasEnabled)
459  {
460  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
461  biasInfoPtr = &biasInfo;
462  }
463  else
464  {
465  // If biases are not enabled pass a dummy tensorinfo for the validation
466  switch(input.GetDataType())
467  {
468  case DataType::BFloat16:
469  {
470  biasInfoPtr = &dummyBFloat16Bias;
471  break;
472  }
473  case DataType::Float16:
474  {
475  biasInfoPtr = &dummyFloat16Bias;
476  break;
477  }
478  case DataType::Float32:
479  {
480  biasInfoPtr = &dummyFloat32Bias;
481  break;
482  }
483  case DataType::QAsymmU8:
484  case DataType::QAsymmS8:
485  case DataType::QSymmS8:
486  case DataType::QSymmS16:
487  {
488  biasInfoPtr = &dummyQA8Bias;
489  break;
490  }
491  default:
492  {
493  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
494  }
495  }
496  }
497  result = layerSupportObject.IsFullyConnectedSupported(
498  OverrideDataType(input, dataType),
499  OverrideDataType(output, dataType),
500  *weightsInfoPtr,
501  *biasInfoPtr,
502  descriptor,
503  reason);
504  break;
505  }
506  case LayerType::Gather:
507  {
508  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
509  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
510  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
511  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
512  const GatherDescriptor& descriptor = cLayer->GetParameters();
513  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
514  input1,
515  OverrideDataType(output, dataType),
516  descriptor,
517  reason);
518  break;
519  }
520  case LayerType::GatherNd:
521  {
522  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
523  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
524  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
525  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
526  input1,
527  OverrideDataType(output, dataType),
528  reason);
529  break;
530  }
531  case LayerType::Input:
532  {
533  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
534  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
535  break;
536  }
538  {
539  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
540  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
541 
542  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
543  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
544 
545  result = layerSupportObject.IsInstanceNormalizationSupported(
546  OverrideDataType(input, dataType),
547  OverrideDataType(output, dataType),
548  descriptor,
549  reason);
550  break;
551  }
553  {
554  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
555  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
556 
557  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
558  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
559 
560  result = layerSupportObject.IsL2NormalizationSupported(
561  OverrideDataType(input, dataType),
562  OverrideDataType(output, dataType),
563  descriptor,
564  reason);
565  break;
566  }
568  {
569  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
570 
571  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
572  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
573  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
574 
575  result = layerSupportObject.IsLogicalBinarySupported(input0,
576  input1,
577  output,
578  cLayer->GetParameters(),
579  reason);
580  break;
581  }
583  {
584  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
585 
586  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
587  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
588 
589  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
590  OverrideDataType(output, dataType),
591  cLayer->GetParameters(),
592  reason);
593  break;
594  }
595  case LayerType::Lstm:
596  {
597  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
598  const LstmDescriptor& descriptor = cLayer->GetParameters();
599 
600  // All inputs.
601  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
602  dataType);
603  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
604  dataType);
605  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
606  dataType);
607  // All outputs
608  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
609  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
610  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
611  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
612 
613  // Basic parameters
614  const TensorInfo& inputToForgetWeights
615  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
616  const TensorInfo& inputToCellWeights
617  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
618  const TensorInfo& inputToOutputWeights
619  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
620  const TensorInfo& recurrentToForgetWeights
621  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
622  const TensorInfo& recurrentToCellWeights
623  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
624  const TensorInfo& recurrentToOutputWeights
625  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
626  const TensorInfo& forgetGateBias
627  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
628  const TensorInfo& cellBias
629  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
630  const TensorInfo& outputGateBias
631  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
632 
633  LstmInputParamsInfo paramsInfo;
634 
635  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
636  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
637  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
638  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
639  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
640  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
641  paramsInfo.m_ForgetGateBias = &forgetGateBias;
642  paramsInfo.m_CellBias = &cellBias;
643  paramsInfo.m_OutputGateBias = &outputGateBias;
644 
645 
646  // Optional parameters
647  TensorInfo optInputToInputWeights;
648  TensorInfo optRecurrentToInputWeights;
649  TensorInfo optCellToInputWeights;
650  TensorInfo optInputGateBias;
651  TensorInfo optProjectionWeights;
652  TensorInfo optProjectionBias;
653  TensorInfo optCellToForgetWeights;
654  TensorInfo optCellToOutputWeights;
655  TensorInfo optInputLayerNormWeights;
656  TensorInfo optForgetLayerNormWeights;
657  TensorInfo optCellLayerNormWeights;
658  TensorInfo optOutputLayerNormWeights;
659 
660  if(!descriptor.m_CifgEnabled)
661  {
662  optInputToInputWeights =
663  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
664  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
665 
666  optRecurrentToInputWeights =
667  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
668  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
669  optInputGateBias =
670  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
671  paramsInfo.m_InputGateBias = &optInputGateBias;
672  }
673 
674  if(descriptor.m_ProjectionEnabled)
675  {
676  optProjectionWeights =
677  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
678  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
679  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
680  {
681  optProjectionBias =
682  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
683  paramsInfo.m_ProjectionBias = &optProjectionBias;
684  }
685  }
686 
687  if(descriptor.m_PeepholeEnabled)
688  {
689  if(!descriptor.m_CifgEnabled)
690  {
691  optCellToInputWeights =
692  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
693  dataType);
694  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
695  }
696  optCellToForgetWeights =
697  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
698  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
699  optCellToOutputWeights =
700  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
701  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
702  }
703 
704  if(descriptor.m_LayerNormEnabled)
705  {
706  if (!descriptor.m_CifgEnabled)
707  {
708  optInputLayerNormWeights = OverrideDataType(
709  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
710  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
711  }
712 
713  optForgetLayerNormWeights = OverrideDataType(
714  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
715  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
716 
717  optCellLayerNormWeights = OverrideDataType(
718  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
719  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
720 
721  optOutputLayerNormWeights = OverrideDataType(
722  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
723  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
724  }
725 
726  result = layerSupportObject.IsLstmSupported(
727  input,
728  outputStateIn,
729  cellStateIn,
730  scratchBuffer,
731  outputStateOut,
732  cellStateOut,
733  output,
734  descriptor,
735  paramsInfo,
736  reason);
737  break;
738  }
739  case LayerType::Maximum:
740  {
741  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
742  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
743  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
744 
745  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
746  OverrideDataType(input1, dataType),
747  OverrideDataType(output, dataType),
748  reason);
749  break;
750  }
751  case LayerType::MemCopy:
752  {
753  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
754  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
755 
756  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
757  OverrideDataType(output, dataType),
758  reason);
759  break;
760  }
762  {
763  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
764  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
765 
766  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
767  OverrideDataType(output, dataType),
768  reason);
769  break;
770  }
771  case LayerType::Merge:
772  {
773  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
774  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
775  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
776 
777  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
778  OverrideDataType(input1, dataType),
779  OverrideDataType(output, dataType),
780  reason);
781  break;
782  }
783  case LayerType::Concat:
784  {
785  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
786 
787  // Get vector of all inputs.
788  auto getTensorInfo = [&dataType](const InputSlot& slot)
789  {
790  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
791  };
792 
793  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
794  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
795  std::vector<TensorInfo> inputs(beginI, endI);
796 
797  auto getTensorInfoPtr = [](const TensorInfo& info)
798  {
799  return &info;
800  };
801 
802  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
803  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
804  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
805 
806  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
807 
808  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
809 
810 
811  break;
812  }
814  {
815  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
816  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
817  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
818  result = layerSupportObject.IsMultiplicationSupported(
819  OverrideDataType(input0, dataType),
820  OverrideDataType(input1, dataType),
821  OverrideDataType(output, dataType),
822  reason);
823  break;
824  }
826  {
827  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
828  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
829  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
830  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
831  OverrideDataType(output, dataType),
832  cLayer->GetParameters(),
833  reason);
834  break;
835  }
836  case LayerType::Output:
837  {
838  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
839  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
840  break;
841  }
842  case LayerType::Permute:
843  {
844  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
845  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
846  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
847  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
848  OverrideDataType(output, dataType),
849  cLayer->GetParameters(),
850  reason);
851  break;
852  }
853  case LayerType::Pad:
854  {
855  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
856  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
857  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
858  result = layerSupportObject.IsPadSupported(
859  OverrideDataType(input, dataType),
860  OverrideDataType(output, dataType),
861  cLayer->GetParameters(),
862  reason);
863  break;
864  }
866  {
867  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
868  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
869  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
870  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
871  OverrideDataType(output, dataType),
872  cLayer->GetParameters(),
873  reason);
874  break;
875  }
877  {
878  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
879  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
880  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
881  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
882  OverrideDataType(output, dataType),
883  cLayer->GetParameters(),
884  reason);
885  break;
886  }
888  {
889  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
890  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
891  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
892  cLayer->GetParameters(),
893  reason);
894  break;
895  }
896  case LayerType::Quantize:
897  {
898  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
899  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
900  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
901  break;
902  }
903  case LayerType::QLstm:
904  {
905  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
906  const QLstmDescriptor& descriptor = cLayer->GetParameters();
907 
908  // Inputs
909  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
910  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
911  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
912 
913  // Outputs
914  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
915  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
916  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
917 
918  // Lstm parameters
919  LstmInputParamsInfo paramsInfo;
920 
921  // Basic parameters
922  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
923  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
924  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
925  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
926  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
927  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
928 
929  paramsInfo.m_RecurrentToForgetWeights =
930  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
931  paramsInfo.m_RecurrentToCellWeights =
932  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
933  paramsInfo.m_RecurrentToOutputWeights =
934  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
935 
936  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
937  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
938  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
939 
940  if(!descriptor.m_CifgEnabled)
941  {
942  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
943  paramsInfo.m_RecurrentToInputWeights =
944  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
945  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
946  }
947 
948  if(descriptor.m_ProjectionEnabled)
949  {
950  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
951 
952  // Projection bias is optional even if projection is enabled
953  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
954  {
955  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
956  }
957  }
958 
959  if(descriptor.m_PeepholeEnabled)
960  {
961  if (!descriptor.m_CifgEnabled)
962  {
963  paramsInfo.m_CellToInputWeights =
964  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
965  }
966 
967  paramsInfo.m_CellToForgetWeights =
968  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
969  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
970  }
971 
972  if(descriptor.m_LayerNormEnabled)
973  {
974  if (!descriptor.m_CifgEnabled)
975  {
976  paramsInfo.m_InputLayerNormWeights =
977  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
978  }
979 
980  paramsInfo.m_ForgetLayerNormWeights =
981  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
982  paramsInfo.m_CellLayerNormWeights =
983  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
984  paramsInfo.m_OutputLayerNormWeights =
985  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
986  }
987 
988  result = layerSupportObject.IsQLstmSupported(input,
989  previousOutputIn,
990  previousCellStateIn,
991  outputStateOut,
992  cellStateOut,
993  output,
994  descriptor,
995  paramsInfo,
996  reason);
997  break;
998  }
1000  {
1001  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1002 
1003  // Inputs
1004  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1005  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1006  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
1007 
1008  // Outputs
1009  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1010  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1011 
1012  // QuantizedLstm parameters
1013  QuantizedLstmInputParamsInfo paramsInfo;
1014 
1015  paramsInfo.m_InputToInputWeights =
1016  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1017  paramsInfo.m_InputToForgetWeights =
1018  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1019  paramsInfo.m_InputToCellWeights =
1020  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1021  paramsInfo.m_InputToOutputWeights =
1022  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1023 
1024  paramsInfo.m_RecurrentToInputWeights =
1025  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1026  paramsInfo.m_RecurrentToForgetWeights =
1027  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1028  paramsInfo.m_RecurrentToCellWeights =
1029  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1030  paramsInfo.m_RecurrentToOutputWeights =
1031  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1032 
1033  paramsInfo.m_InputGateBias =
1034  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1035  paramsInfo.m_ForgetGateBias =
1036  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1037  paramsInfo.m_CellBias =
1038  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1039  paramsInfo.m_OutputGateBias =
1040  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1041 
1042  result = layerSupportObject.IsQuantizedLstmSupported(input,
1043  previousCellStateIn,
1044  previousOutputIn,
1045  cellStateOut,
1046  output,
1047  paramsInfo,
1048  reason);
1049  break;
1050  }
1051  case LayerType::Division:
1052  {
1053  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1054  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1055  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1056  result = layerSupportObject.IsDivisionSupported(
1057  OverrideDataType(input0, dataType),
1058  OverrideDataType(input1, dataType),
1059  OverrideDataType(output, dataType),
1060  reason);
1061  break;
1062  }
1063  case LayerType::Rank:
1064  {
1065  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1066  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1067  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1068  OverrideDataType(output, dataType),
1069  reason);
1070  break;
1071  }
1072  case LayerType::Reshape:
1073  {
1074  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1075  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1076  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1077  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1078  OverrideDataType(output, dataType),
1079  cLayer->GetParameters(),
1080  reason);
1081  break;
1082  }
1083  case LayerType::Resize:
1084  {
1085  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1086  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1087  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1088  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1089  OverrideDataType(output, dataType),
1090  cLayer->GetParameters(),
1091  reason);
1092  break;
1093  }
1094  case LayerType::Shape:
1095  {
1096  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1097  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1098 
1099  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1100  OverrideDataType(output, dataType),
1101  reason);
1102  break;
1103  }
1104  case LayerType::Slice:
1105  {
1106  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1107 
1108  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1109  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1110 
1111  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1112  OverrideDataType(output, dataType),
1113  cLayer->GetParameters(),
1114  reason);
1115  break;
1116  }
1117  case LayerType::Softmax:
1118  {
1119  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1120  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1121  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1122  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1123  OverrideDataType(output, dataType),
1124  cLayer->GetParameters(),
1125  reason);
1126  break;
1127  }
1129  {
1130  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1133  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1134  OverrideDataType(output, dataType),
1135  cLayer->GetParameters(),
1136  reason);
1137  break;
1138  }
1140  {
1141  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1142 
1143  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1144  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1145 
1146  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1147  OverrideDataType(output, dataType),
1148  cLayer->GetParameters(),
1149  reason);
1150  break;
1151  }
1152  case LayerType::Splitter:
1153  {
1154  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1155  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1156 
1157  // Get vector of all outputs.
1158  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1159  {
1160  return OverrideDataType(slot.GetTensorInfo(), dataType);
1161  };
1162  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1163  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1164  std::vector<TensorInfo> outputs(beginI, endI);
1165 
1166  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1167 
1168  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1169  outputPtrs,
1170  cLayer->GetParameters(),
1171  reason);
1172  break;
1173  }
1174  case LayerType::Stack:
1175  {
1176  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1177 
1178  // Get vector of all inputs.
1179  auto getTensorInfo = [&dataType](const InputSlot& slot)
1180  {
1181  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1182  };
1183  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1184  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1185  std::vector<TensorInfo> inputs(beginI, endI);
1186 
1187  auto getTensorInfoPtr = [](const TensorInfo& info)
1188  {
1189  return &info;
1190  };
1191  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1192  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1193  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1194 
1195  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1196 
1197  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1198 
1199  break;
1200  }
1201  case LayerType::StandIn:
1202  {
1203  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1204 
1205  // Get vector of all inputs.
1206  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1207  {
1208  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1209  };
1210  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1211  {
1212  return OverrideDataType(slot.GetTensorInfo(), dataType);
1213  };
1214  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1215  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1216  std::vector<TensorInfo> inputs(beginI, endI);
1217 
1218  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1219  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1220  std::vector<TensorInfo> outputs(beginO, endO);
1221 
1222 
1223  auto getTensorInfoPtr = [](const TensorInfo& info)
1224  {
1225  return &info;
1226  };
1227  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1228  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1229  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1230 
1231  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1232  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1233  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1234 
1235 
1236  result = layerSupportObject.IsStandInSupported(inputPtrs,
1237  outputPtrs,
1238  cLayer->GetParameters(),
1239  reason);
1240  break;
1241  }
1243  {
1244  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1245  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1246  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1247  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1248  OverrideDataType(output, dataType),
1249  cLayer->GetParameters(),
1250  reason);
1251  break;
1252  }
1254  {
1255  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1256  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1257  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1258  result = layerSupportObject.IsSubtractionSupported(
1259  OverrideDataType(input0, dataType),
1260  OverrideDataType(input1, dataType),
1261  OverrideDataType(output, dataType),
1262  reason);
1263  break;
1264  }
1265  case LayerType::Switch:
1266  {
1267  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1268  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1269  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1270  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1271  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1272  OverrideDataType(input1, dataType),
1273  OverrideDataType(output0, dataType),
1274  OverrideDataType(output1, dataType),
1275  reason);
1276  break;
1277  }
1278  case LayerType::Mean:
1279  {
1280  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1281  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1282  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1283  result = layerSupportObject.IsMeanSupported(
1284  OverrideDataType(input, dataType),
1285  OverrideDataType(output, dataType),
1286  cLayer->GetParameters(),
1287  reason);
1288  break;
1289  }
1290  case LayerType::Minimum:
1291  {
1292  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1293  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1294  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1295  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1296  OverrideDataType(input1, dataType),
1297  OverrideDataType(output, dataType),
1298  reason);
1299  break;
1300  }
1301  case LayerType::Prelu:
1302  {
1303  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1304  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1305  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1306  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1307  OverrideDataType(alpha, dataType),
1308  OverrideDataType(output, dataType),
1309  reason);
1310  break;
1311  }
1312  case LayerType::Transpose:
1313  {
1314  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1315  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1316  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1317  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1318  OverrideDataType(output, dataType),
1319  cLayer->GetParameters(),
1320  reason);
1321  break;
1322  }
1324  {
1325  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1326 
1327  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1328  dataType);
1329  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1330 
1331  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1332 
1333  Optional<TensorInfo> biases;
1334  if (descriptor.m_BiasEnabled)
1335  {
1336  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1337  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1338  GetBiasTypeFromWeightsType(dataType));
1339  }
1340 
1341  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1342  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1343 
1344  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1345  output,
1346  descriptor,
1347  weights,
1348  biases,
1349  reason);
1350 
1351  break;
1352  }
1353  case LayerType::Reduce:
1354  {
1355  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1356  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1357  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1358 
1359  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1360  OverrideDataType(output, dataType),
1361  cLayer->GetParameters(),
1362  reason);
1363  break;
1364  }
1366  {
1367  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1368  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1369 
1370  // All inputs.
1371  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1372  dataType);
1373  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1374  dataType);
1375  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1376  dataType);
1377  // Outputs
1378  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1379  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1380  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1381 
1382  // Basic parameters
1383  const TensorInfo& inputToForgetWeights
1384  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1385  const TensorInfo& inputToCellWeights
1386  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1387  const TensorInfo& inputToOutputWeights
1388  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1389  const TensorInfo& recurrentToForgetWeights
1390  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1391  const TensorInfo& recurrentToCellWeights
1392  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1393  const TensorInfo& recurrentToOutputWeights
1394  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1395  const TensorInfo& forgetGateBias
1396  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1397  const TensorInfo& cellBias
1398  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1399  const TensorInfo& outputGateBias
1400  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1401 
1402  LstmInputParamsInfo paramsInfo;
1403 
1404  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1405  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1406  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1407  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1408  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1409  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1410  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1411  paramsInfo.m_CellBias = &cellBias;
1412  paramsInfo.m_OutputGateBias = &outputGateBias;
1413 
1414  // Optional parameters
1415  TensorInfo optInputToInputWeights;
1416  TensorInfo optRecurrentToInputWeights;
1417  TensorInfo optCellToInputWeights;
1418  TensorInfo optInputGateBias;
1419  TensorInfo optProjectionWeights;
1420  TensorInfo optProjectionBias;
1421  TensorInfo optCellToForgetWeights;
1422  TensorInfo optCellToOutputWeights;
1423  TensorInfo optInputLayerNormWeights;
1424  TensorInfo optForgetLayerNormWeights;
1425  TensorInfo optCellLayerNormWeights;
1426  TensorInfo optOutputLayerNormWeights;
1427 
1428  if(!descriptor.m_CifgEnabled)
1429  {
1430  optInputToInputWeights =
1431  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1432  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1433 
1434  optRecurrentToInputWeights =
1435  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1436  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1437  optInputGateBias =
1438  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1439  paramsInfo.m_InputGateBias = &optInputGateBias;
1440  }
1441 
1442  if(descriptor.m_ProjectionEnabled)
1443  {
1444  optProjectionWeights =
1445  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1446  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1447  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1448  {
1449  optProjectionBias =
1450  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1451  paramsInfo.m_ProjectionBias = &optProjectionBias;
1452  }
1453  }
1454 
1455  if(descriptor.m_PeepholeEnabled)
1456  {
1457  if(!descriptor.m_CifgEnabled)
1458  {
1459  optCellToInputWeights =
1460  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1461  dataType);
1462  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1463  }
1464  optCellToForgetWeights =
1465  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1466  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1467  optCellToOutputWeights =
1468  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1469  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1470  }
1471 
1472  if(descriptor.m_LayerNormEnabled)
1473  {
1474  if (!descriptor.m_CifgEnabled)
1475  {
1476  optInputLayerNormWeights = OverrideDataType(
1477  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1478  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1479  }
1480 
1481  optForgetLayerNormWeights = OverrideDataType(
1482  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1483  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1484 
1485  optCellLayerNormWeights = OverrideDataType(
1486  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1487  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1488 
1489  optOutputLayerNormWeights = OverrideDataType(
1490  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1491  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1492  }
1493 
1494  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1495  outputStateIn,
1496  cellStateIn,
1497  outputStateOut,
1498  cellStateOut,
1499  output,
1500  descriptor,
1501  paramsInfo,
1502  reason);
1503  break;
1504  }
1505  default:
1506  {
1507  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1508  reason.value() = "Unrecognised layer type";
1509  result = false;
1510  break;
1511  }
1512  }
1513  return result;
1514 }
1515 
1517  const IConnectableLayer& connectableLayer,
1518  Optional<DataType> dataType,
1519  std::string& outReasonIfUnsupported)
1520 {
1521  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1522 }
1523 
1525  Optional<DataType> dataType,
1526  std::string& outReasonIfUnsupported)
1527 {
1528  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1529  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1530 }
1531 
1532 // TODO merge with defaulted modelOptions above
1534  Optional<DataType> dataType,
1535  std::string& outReasonIfUnsupported,
1536  const ModelOptions& modelOptions)
1537 {
1538  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1539  return IsLayerConfigurationSupported(layer->GetBackendId(),
1540  connectableLayer,
1541  dataType,
1542  outReasonIfUnsupported,
1543  modelOptions);
1544 }
1545 
1547  const IConnectableLayer& connectableLayer,
1548  Optional<DataType> dataType,
1549  std::string& outReasonIfUnsupported,
1550  const ModelOptions& modelOptions)
1551 {
1552  return IsLayerConfigurationSupported(backendId,
1553  connectableLayer,
1554  dataType,
1555  outReasonIfUnsupported,
1556  modelOptions);
1557 }
1559 std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
1560  const QueueDescriptor& descriptor,
1561  const WorkloadInfo& info) const
1562 {
1563  switch(type)
1564  {
1565  case LayerType::Activation :
1566  {
1567  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
1568  return CreateActivation(*activationQueueDescriptor, info);
1569  }
1570  case LayerType::Addition :
1571  {
1572  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
1573  return CreateAddition(*additionQueueDescriptor, info);
1574  }
1575  case LayerType::ArgMinMax :
1576  {
1577  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
1578  return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
1579  }
1581  {
1582  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
1583  return CreateBatchNormalization(*batchNormQueueDescriptor, info);
1584  }
1586  {
1587  auto batchToSpaceNdQueueDescriptor
1588  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
1589  return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
1590  }
1591  case LayerType::Cast :
1592  {
1593  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
1594  return CreateCast(*castQueueDescriptor, info);
1595  }
1597  {
1598  auto channelShuffleQueueDescriptor
1599  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
1600  return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
1601  }
1602  case LayerType::Comparison :
1603  {
1604  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
1605  return CreateComparison(*comparisonQueueDescriptor, info);
1606  }
1607  case LayerType::Concat :
1608  {
1609  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
1610  return CreateConcat(*concatQueueDescriptor, info);
1611  }
1612  case LayerType::Constant :
1613  {
1614  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
1615  return CreateConstant(*constantQueueDescriptor, info);
1616  }
1618  {
1619  auto convertBf16ToFp32QueueDescriptor
1620  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
1621  return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
1622  }
1624  {
1625  auto convertFp16ToFp32QueueDescriptor
1626  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
1627  return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
1628  }
1630  {
1631  auto convertFp32ToBf16QueueDescriptor
1632  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
1633  return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
1634  }
1636  {
1637  auto convertFp32ToFp16QueueDescriptor
1638  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
1639  return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
1640  }
1642  {
1643  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
1644  return CreateConvolution2d(*convolution2dQueueDescriptor, info);
1645  }
1647  {
1648  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
1649  return CreateConvolution3d(*convolution3dQueueDescriptor, info);
1650  }
1651  case LayerType::Debug:
1652  {
1653  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
1654  return CreateDebug(*debugQueueDescriptor, info);
1655  }
1657  {
1658  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
1659  return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
1660  }
1662  {
1663  auto depthwiseConvolution2DQueueDescriptor
1664  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
1665  return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
1666  }
1667  case LayerType::Dequantize:
1668  {
1669  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
1670  return CreateDequantize(*dequantizeQueueDescriptor, info);
1671  }
1673  {
1674  auto detectionPostProcessQueueDescriptor
1675  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
1676  return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
1677  }
1678  case LayerType::Division:
1679  {
1680  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
1681  return CreateDivision(*divisionQueueDescriptor, info);
1682  }
1684  {
1685  auto elementwiseUnaryQueueDescriptor
1686  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
1687  return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
1688 
1689  }
1691  {
1692  auto fakeQuantizationQueueDescriptor
1693  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
1694  return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
1695  }
1696  case LayerType::Fill:
1697  {
1698  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
1699  return CreateFill(*fillQueueDescriptor, info);
1700  }
1701  case LayerType::Floor:
1702  {
1703  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
1704  return CreateFloor(*floorQueueDescriptor, info);
1705  }
1707  {
1708  auto fullyConnectedQueueDescriptor
1709  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
1710  return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
1711  }
1712  case LayerType::Gather:
1713  {
1714  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
1715  return CreateGather(*gatherQueueDescriptor, info);
1716  }
1717  case LayerType::Input:
1718  {
1719  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
1720  return CreateInput(*inputQueueDescriptor, info);
1721  }
1723  {
1724  auto instanceNormalizationQueueDescriptor
1725  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
1726  return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
1727  }
1729  {
1730  auto l2NormalizationQueueDescriptor
1731  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
1732  return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
1733  }
1735  {
1736  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
1737  return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
1738  }
1739  case LayerType::LogSoftmax:
1740  {
1741  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
1742  return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
1743  }
1744  case LayerType::Lstm:
1745  {
1746  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
1747  return CreateLstm(*lstmQueueDescriptor, info);
1748  }
1749  case LayerType::Maximum:
1750  {
1751  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
1752  return CreateMaximum(*maximumQueueDescriptor, info);
1753  }
1754  case LayerType::Mean:
1755  {
1756  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
1757  return CreateMean(*meanQueueDescriptor, info);
1758  }
1759  case LayerType::MemCopy:
1760  {
1761  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
1762  return CreateMemCopy(*memCopyQueueDescriptor, info);
1763  }
1764  case LayerType::MemImport:
1765  {
1766  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
1767  return CreateMemImport(*memImportQueueDescriptor, info);
1768  }
1769  case LayerType::Minimum:
1770  {
1771  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
1772  return CreateMinimum(*minimumQueueDescriptor, info);
1773  }
1775  {
1776  auto multiplicationQueueDescriptor
1777  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
1778  return CreateMultiplication(*multiplicationQueueDescriptor, info);
1779  }
1781  {
1782  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
1783  return CreateNormalization(*normalizationQueueDescriptor, info);
1784  }
1785  case LayerType::Output:
1786  {
1787  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
1788  return CreateOutput(*outputQueueDescriptor, info);
1789  }
1790  case LayerType::Pad:
1791  {
1792  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
1793  return CreatePad(*padQueueDescriptor, info);
1794  }
1795  case LayerType::Permute:
1796  {
1797  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
1798  return CreatePermute(*permuteQueueDescriptor, info);
1799  }
1800  case LayerType::Pooling2d:
1801  {
1802  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
1803  return CreatePooling2d(*pooling2dQueueDescriptor, info);
1804  }
1805  case LayerType::Pooling3d:
1806  {
1807  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
1808  return CreatePooling3d(*pooling3dQueueDescriptor, info);
1809  }
1811  {
1812  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
1813  return CreatePreCompiled(*preCompiledQueueDescriptor, info);
1814  }
1815  case LayerType::Prelu:
1816  {
1817  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
1818  return CreatePrelu(*preluQueueDescriptor, info);
1819  }
1820  case LayerType::QLstm:
1821  {
1822  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
1823  return CreateQLstm(*qlstmQueueDescriptor, info);
1824  }
1825  case LayerType::Quantize:
1826  {
1827  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
1828  return CreateQuantize(*quantizeQueueDescriptor, info);
1829  }
1830  case LayerType::Rank:
1831  {
1832  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
1833  return CreateRank(*rankQueueDescriptor, info);
1834  }
1835  case LayerType::Reduce:
1836  {
1837  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
1838  return CreateReduce(*reduceQueueDescriptor, info);
1839  }
1840  case LayerType::Reshape:
1841  {
1842  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
1843  return CreateReshape(*reshapeQueueDescriptor, info);
1844  }
1845  case LayerType::Resize:
1846  {
1847  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
1848  return CreateResize(*resizeQueueDescriptor, info);
1849  }
1850  case LayerType::Shape:
1851  {
1852  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
1853  return CreateShape(*shapeQueueDescriptor, info);
1854  }
1855  case LayerType::Slice:
1856  {
1857  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
1858  return CreateSlice(*sliceQueueDescriptor, info);
1859  }
1860  case LayerType::Softmax:
1861  {
1862  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
1863  return CreateSoftmax(*softmaxQueueDescriptor, info);
1864  }
1866  {
1867  auto spaceToBatchNdQueueDescriptor
1868  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
1869  return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
1870  }
1872  {
1873  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
1874  return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
1875  }
1876  case LayerType::Splitter:
1877  {
1878  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
1879  return CreateSplitter(*splitterQueueDescriptor, info);
1880  }
1881  case LayerType::Stack:
1882  {
1883  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
1884  return CreateStack(*stackQueueDescriptor, info);
1885  }
1887  {
1888  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
1889  return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
1890  }
1892  {
1893  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
1894  return CreateSubtraction(*subtractionQueueDescriptor, info);
1895  }
1896  case LayerType::Transpose:
1897  {
1898  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
1899  return CreateTranspose(*transposeQueueDescriptor, info);
1900  }
1902  {
1903  auto transposeConvolution2dQueueDescriptor
1904  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
1905  return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
1906  }
1908  {
1909  auto unidirectionalSequenceLstmQueueDescriptor
1910  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
1911  return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
1912  }
1913  default:
1914  return nullptr;
1915  }
1916 }
1918 
1919 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1920  const WorkloadInfo& /*info*/) const
1921 {
1922  return std::unique_ptr<IWorkload>();
1923 }
1924 
1925 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1926  const WorkloadInfo& /*info*/) const
1927 {
1928  return std::unique_ptr<IWorkload>();
1929 }
1930 
1931 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1932  const WorkloadInfo& /*info*/) const
1933 {
1934  return std::unique_ptr<IWorkload>();
1935 }
1936 
1937 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1938  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1939 {
1940  return std::unique_ptr<IWorkload>();
1941 }
1942 
1943 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1944  const WorkloadInfo& /*Info*/) const
1945 {
1946  return std::unique_ptr<IWorkload>();
1947 }
1948 
1949 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1950  const WorkloadInfo& /*info*/) const
1951 {
1952  return std::unique_ptr<IWorkload>();
1953 }
1954 
1955 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1956  const WorkloadInfo& /*info*/) const
1957 {
1958  return std::unique_ptr<IWorkload>();
1959 }
1960 
1961 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1962  const WorkloadInfo& /*info*/) const
1963 {
1964  return std::unique_ptr<IWorkload>();
1965 }
1966 
1967 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1968  const WorkloadInfo& /*info*/) const
1969 {
1970  return std::unique_ptr<IWorkload>();
1971 }
1972 
1973 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1974  const WorkloadInfo& /*info*/) const
1975 {
1976  return std::unique_ptr<IWorkload>();
1977 }
1978 
1979 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
1980  const WorkloadInfo& /*info*/) const
1981 {
1982  return std::unique_ptr<IWorkload>();
1983 }
1984 
1985 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
1986  const WorkloadInfo& /*info*/) const
1987 {
1988  return std::unique_ptr<IWorkload>();
1989 }
1990 
1991 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
1992  const WorkloadInfo& /*info*/) const
1993 {
1994  return std::unique_ptr<IWorkload>();
1995 }
1996 
1997 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
1998  const WorkloadInfo& /*info*/) const
1999 {
2000  return std::unique_ptr<IWorkload>();
2001 }
2002 
2003 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
2004  const WorkloadInfo& /*info*/) const
2005 {
2006  return std::unique_ptr<IWorkload>();
2007 }
2008 
2009 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
2010  const WorkloadInfo& /*info*/) const
2011 {
2012  return std::unique_ptr<IWorkload>();
2013 }
2014 
2015 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
2016  const WorkloadInfo& /*info*/) const
2017 {
2018  return std::unique_ptr<IWorkload>();
2019 }
2020 
2021 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
2022  const WorkloadInfo& /*info*/) const
2023 {
2024  return std::unique_ptr<IWorkload>();
2025 }
2026 
2027 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
2028  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2029 {
2030  return std::unique_ptr<IWorkload>();
2031 }
2032 
2033 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
2034  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2035 {
2036  return std::unique_ptr<IWorkload>();
2037 }
2038 
2039 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
2040  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
2041 {
2042  return std::unique_ptr<IWorkload>();
2043 }
2044 
2045 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
2046  const WorkloadInfo& /*info*/) const
2047 {
2048  return std::unique_ptr<IWorkload>();
2049 }
2050 
2051 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2052  const WorkloadInfo& /*info*/) const
2053 {
2054  return std::unique_ptr<IWorkload>();
2055 }
2056 
2057 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
2058  const WorkloadInfo& /*info*/) const
2059 {
2060  return std::unique_ptr<IWorkload>();
2061 }
2062 
2063 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
2064  const WorkloadInfo& /*info*/) const
2065 {
2066  return std::unique_ptr<IWorkload>();
2067 }
2068 
2069 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
2070  const WorkloadInfo& /*info*/) const
2071 {
2072  return std::unique_ptr<IWorkload>();
2073 }
2074 
2075 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
2076  const WorkloadInfo& /*info*/) const
2077 {
2078  return std::unique_ptr<IWorkload>();
2079 }
2080 
2081 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
2082  const WorkloadInfo& /*info*/) const
2083 {
2084  return std::unique_ptr<IWorkload>();
2085 }
2086 
2087 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
2088  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
2089  const WorkloadInfo& /*info*/) const
2090 {
2091  return std::unique_ptr<IWorkload>();
2092 }
2093 
2094 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
2095  const WorkloadInfo& /*info*/) const
2096 {
2097  return std::unique_ptr<IWorkload>();
2098 }
2099 
2100 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
2101  const WorkloadInfo& /*info*/) const
2102 {
2103  return std::unique_ptr<IWorkload>();
2104 }
2105 
2106 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
2107  const WorkloadInfo& /*info*/) const
2108 {
2109  return std::unique_ptr<IWorkload>();
2110 }
2111 
2112 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
2113  const WorkloadInfo& /*info*/) const
2114 {
2115  return std::unique_ptr<IWorkload>();
2116 }
2117 
2118 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
2119  const WorkloadInfo& /*info*/) const
2120 {
2121  return std::unique_ptr<IWorkload>();
2122 }
2123 
2124 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
2125  const WorkloadInfo& /*info*/) const
2126 {
2127  return std::unique_ptr<IWorkload>();
2128 }
2129 
2130 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
2131  const WorkloadInfo& /*Info*/) const
2132 {
2133  return std::unique_ptr<IWorkload>();
2134 }
2135 
2136 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
2137  const WorkloadInfo& /*info*/) const
2138 {
2139  return std::unique_ptr<IWorkload>();
2140 }
2141 
2142 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
2143  const WorkloadInfo& /*info*/) const
2144 {
2145  return std::unique_ptr<IWorkload>();
2146 }
2147 
2148 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
2149  const WorkloadInfo& /*info*/) const
2150 {
2151  return std::unique_ptr<IWorkload>();
2152 }
2153 
2154 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
2155  const WorkloadInfo& /*info*/) const
2156 {
2157  return std::unique_ptr<IWorkload>();
2158 }
2159 
2160 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
2161  const WorkloadInfo& /*info*/) const
2162 {
2163  return std::unique_ptr<IWorkload>();
2164 }
2165 
2166 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
2167  const WorkloadInfo& /*info*/) const
2168 {
2169  return std::unique_ptr<IWorkload>();
2170 }
2171 
2172 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
2173  const WorkloadInfo& /*info*/) const
2174 {
2175  return std::unique_ptr<IWorkload>();
2176 }
2177 
2178 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
2179  const WorkloadInfo& /*Info*/) const
2180 {
2181  return std::unique_ptr<IWorkload>();
2182 }
2183 
2184 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
2185  const WorkloadInfo& /*info*/) const
2186 {
2187  return std::unique_ptr<IWorkload>();
2188 }
2189 
2190 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
2191  const WorkloadInfo& /*info*/) const
2192 {
2193  return std::unique_ptr<IWorkload>();
2194 }
2195 
2196 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/,
2197  const WorkloadInfo& /*info*/) const
2198 {
2199  return std::unique_ptr<IWorkload>();
2200 }
2201 
2202 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
2203  const WorkloadInfo& /*info*/) const
2204 {
2205  return std::unique_ptr<IWorkload>();
2206 }
2207 
2208 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
2209  const WorkloadInfo &/*info*/) const
2210 {
2211  return std::unique_ptr<IWorkload>();
2212 }
2213 
2214 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
2215  const WorkloadInfo& /*Info*/) const
2216 {
2217  return std::unique_ptr<IWorkload>();
2218 }
2219 
2220 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
2221  const WorkloadInfo& /*info*/) const
2222 {
2223  return std::unique_ptr<IWorkload>();
2224 }
2225 
2226 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
2227  const WorkloadInfo& /*info*/) const
2228 {
2229  return std::unique_ptr<IWorkload>();
2230 }
2231 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
2232  const WorkloadInfo& /*info*/) const
2233 {
2234  return std::unique_ptr<IWorkload>();
2235 }
2236 
2237 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
2238  const WorkloadInfo& /*info*/) const
2239 {
2240  return std::unique_ptr<IWorkload>();
2241 }
2242 
2243 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
2244  const WorkloadInfo& /*info*/) const
2245 {
2246  return std::unique_ptr<IWorkload>();
2247 }
2248 
2249 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
2250  const WorkloadInfo& /*info*/) const
2251 {
2252  return std::unique_ptr<IWorkload>();
2253 }
2254 
2255 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
2256  const WorkloadInfo& /*info*/) const
2257 {
2258  return std::unique_ptr<IWorkload>();
2259 }
2260 
2261 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
2262  const WorkloadInfo& /*info*/) const
2263 {
2264  return std::unique_ptr<IWorkload>();
2265 }
2266 
2267 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
2268  const WorkloadInfo& /*info*/) const
2269 {
2270  return std::unique_ptr<IWorkload>();
2271 }
2272 
2273 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
2274  const WorkloadInfo& /*info*/) const
2275 {
2276  return std::unique_ptr<IWorkload>();
2277 }
2278 
2279 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
2280  const WorkloadInfo& /*info*/) const
2281 {
2282  return std::unique_ptr<IWorkload>();
2283 }
2284 
2285 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
2286  const WorkloadInfo& /*info*/) const
2287 {
2288  return std::unique_ptr<IWorkload>();
2289 }
2290 
2291 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
2292  const WorkloadInfo& /*info*/) const
2293 {
2294  return std::unique_ptr<IWorkload>();
2295 }
2296 
2297 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
2298  const WorkloadInfo& /*info*/) const
2299 {
2300  return std::unique_ptr<IWorkload>();
2301 }
2302 
2303 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
2304  const WorkloadInfo& /*info*/) const
2305 {
2306  return std::unique_ptr<IWorkload>();
2307 }
2308 
2309 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
2310  const WorkloadInfo& /*info*/) const
2311 {
2312  return std::unique_ptr<IWorkload>();
2313 }
2314 
2315 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
2316  const WorkloadInfo& /*info*/) const
2317 {
2318  return std::unique_ptr<IWorkload>();
2319 }
2320 
2321 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
2322  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
2323  const WorkloadInfo& /*info*/) const
2324 {
2325  return std::unique_ptr<IWorkload>();
2326 }
2327 
2328 std::unique_ptr<IWorkload> IWorkloadFactory::CreateUnidirectionalSequenceLstm(
2329  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
2330  const WorkloadInfo& /*info*/) const
2331 {
2332  return std::unique_ptr<IWorkload>();
2333 }
2334 
2335 } // namepsace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::vector< BackendOptions > ModelOptions
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
BackendRegistry & BackendRegistryInstance()
Copyright (c) 2021 ARM Limited and Contributors.
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateInput(const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LstmDescriptor UnidirectionalSequenceLstmDescriptor
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467