ArmNN
 21.08
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
21 
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 namespace
28 {
29 using LayerList = std::list<Layer*>;
30 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
31 
32 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
33 {
34  if (!type)
35  {
36  return info;
37  }
38 
39  return TensorInfo(info.GetShape(),
40  type.value(),
41  info.GetQuantizationScale(),
42  info.GetQuantizationOffset(),
43  info.IsConstant());
44 }
45 
46 } // anonymous namespace
47 
48 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
49  const IConnectableLayer& connectableLayer,
50  Optional<DataType> dataType,
51  std::string& outReasonIfUnsupported,
52  const ModelOptions& modelOptions)
53 {
54  Optional<std::string&> reason = outReasonIfUnsupported;
55  bool result;
56  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
57 
58  auto const& backendRegistry = BackendRegistryInstance();
59  if (!backendRegistry.IsBackendRegistered(backendId))
60  {
61  std::stringstream ss;
62  ss << connectableLayer.GetName() << " is not supported on " << backendId
63  << " because this backend is not registered.";
64 
65  outReasonIfUnsupported = ss.str();
66  return false;
67  }
68 
69  auto backendFactory = backendRegistry.GetFactory(backendId);
70  auto backendObject = backendFactory();
71  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
72 
73  switch(layer.GetType())
74  {
76  {
77  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
78  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
79  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
80  result = layerSupportObject.IsActivationSupported(
81  OverrideDataType(input, dataType),
82  OverrideDataType(output, dataType),
83  cLayer->GetParameters(),
84  reason);
85  break;
86  }
88  {
89  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
90  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
91  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
92  result = layerSupportObject.IsAdditionSupported(
93  OverrideDataType(input0, dataType),
94  OverrideDataType(input1, dataType),
95  OverrideDataType(output, dataType),
96  reason);
97  break;
98  }
100  {
101  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
102  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
103 
104  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
105  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
106  result = layerSupportObject.IsArgMinMaxSupported(
107  OverrideDataType(input, dataType),
108  OverrideDataType(output, DataType::Signed32),
109  descriptor,
110  reason);
111  break;
112  }
114  {
115  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
116  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
117  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
118  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
119  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
120  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
121  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
122  result = layerSupportObject.IsBatchNormalizationSupported(
123  OverrideDataType(input, dataType),
124  OverrideDataType(output, dataType),
125  OverrideDataType(mean, dataType),
126  OverrideDataType(var, dataType),
127  OverrideDataType(beta, dataType),
128  OverrideDataType(gamma, dataType),
129  cLayer->GetParameters(),
130  reason);
131  break;
132  }
134  {
135  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
136  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
137  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
138 
139  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
140  OverrideDataType(output, dataType),
141  cLayer->GetParameters(),
142  reason);
143  break;
144  }
145  case LayerType::Cast:
146  {
147  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
148  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
149 
150  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
151  OverrideDataType(output, dataType),
152  reason);
153  break;
154  }
156  {
157  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
158 
159  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
160  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
161  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
162 
163  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
164  OverrideDataType(input1, dataType),
165  OverrideDataType(output, DataType::Boolean),
166  cLayer->GetParameters(),
167  reason);
168  break;
169  }
170  case LayerType::Constant:
171  {
172  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
173  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
174  break;
175  }
177  {
178  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
179  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
180  result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
181  break;
182  }
184  {
185  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
186  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
187  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
188  break;
189  }
191  {
192  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
193  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
194  result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
195  break;
196  }
198  {
199  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
200  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
201  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
202  break;
203  }
205  {
206  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
207 
208  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
209  dataType);
210  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
211  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
212 
213  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
214 
215  // Construct optional biases object based on the value of m_BiasEnabled
216  Optional<TensorInfo> biases;
217  if (descriptor.m_BiasEnabled)
218  {
219  biases =
220  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
221  }
222 
223  result = layerSupportObject.IsConvolution2dSupported(
224  input,
225  output,
226  descriptor,
227  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
228  biases,
229  reason);
230  break;
231  }
232  case LayerType::Debug:
233  {
234  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
235  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
236 
237  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
238  OverrideDataType(output, dataType),
239  reason);
240  break;
241  }
243  {
244  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
245 
246  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
247  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
248 
249  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
250  OverrideDataType(output, dataType),
251  cLayer->GetParameters(),
252  reason);
253  break;
254  }
256  {
257  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
258  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
259  dataType);
260  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
261  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
262 
263  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
264 
265  // Construct optional biases object based on the value of m_BiasEnabled
266  Optional<TensorInfo> biases;
267  if (descriptor.m_BiasEnabled)
268  {
269  biases =
270  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
271  }
272 
273  result = layerSupportObject.IsDepthwiseConvolutionSupported(
274  input,
275  output,
276  descriptor,
277  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
278  biases,
279  reason);
280  break;
281  }
283  {
284  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
285  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
286 
287  result = layerSupportObject.IsDequantizeSupported(input,
288  OverrideDataType(output, dataType),
289  reason);
290  break;
291  }
293  {
294  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
295  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
296  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
297  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
298 
299  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
300  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
301  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
302  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
303 
304  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
305  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
306  scores,
307  anchors,
308  detectionBoxes,
309  detectionClasses,
310  detectionScores,
311  numDetections,
312  descriptor,
313  reason);
314  break;
315  }
317  {
318  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
319 
320  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
321  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
322 
323  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
324  OverrideDataType(output, dataType),
325  cLayer->GetParameters(),
326  reason);
327  break;
328  }
329  case LayerType::Fill:
330  {
331  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
332  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
333  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
334  const FillDescriptor& descriptor = cLayer->GetParameters();
335 
336  result = layerSupportObject.IsFillSupported(
337  OverrideDataType(input, dataType),
338  OverrideDataType(output, dataType),
339  descriptor,
340  reason);
341  break;
342  }
344  {
345  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
346  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
347  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
348  cLayer->GetParameters(),
349  reason);
350  break;
351  }
352  case LayerType::Floor:
353  {
354  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
355  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
356  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
357  OverrideDataType(output, dataType),
358  reason);
359  break;
360  }
362  {
363  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
364  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
365  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
366 
367  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
368  TensorInfo weightsInfo;
369  const TensorInfo* weightsInfoPtr = nullptr;
370 
371  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
372  weightsInfoPtr = &weightsInfo;
373 
374  TensorInfo biasInfo;
375  const TensorInfo* biasInfoPtr = nullptr;
376  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
377  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
378  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
379  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
380 
381  if (descriptor.m_BiasEnabled)
382  {
383  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
384  biasInfoPtr = &biasInfo;
385  }
386  else
387  {
388  // If biases are not enabled pass a dummy tensorinfo for the validation
389  switch(input.GetDataType())
390  {
391  case DataType::BFloat16:
392  {
393  biasInfoPtr = &dummyBFloat16Bias;
394  break;
395  }
396  case DataType::Float16:
397  {
398  biasInfoPtr = &dummyFloat16Bias;
399  break;
400  }
401  case DataType::Float32:
402  {
403  biasInfoPtr = &dummyFloat32Bias;
404  break;
405  }
406  case DataType::QAsymmU8:
407  case DataType::QAsymmS8:
408  case DataType::QSymmS8:
409  case DataType::QSymmS16:
410  {
411  biasInfoPtr = &dummyQA8Bias;
412  break;
413  }
414  default:
415  {
416  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
417  }
418  }
419  }
420  result = layerSupportObject.IsFullyConnectedSupported(
421  OverrideDataType(input, dataType),
422  OverrideDataType(output, dataType),
423  *weightsInfoPtr,
424  *biasInfoPtr,
425  descriptor,
426  reason);
427  break;
428  }
429  case LayerType::Gather:
430  {
431  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
432  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
433  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
434  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
435  const GatherDescriptor& descriptor = cLayer->GetParameters();
436  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
437  input1,
438  OverrideDataType(output, dataType),
439  descriptor,
440  reason);
441  break;
442  }
443  case LayerType::Input:
444  {
445  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
446  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
447  break;
448  }
450  {
451  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
452  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
453 
454  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
455  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
456 
457  result = layerSupportObject.IsInstanceNormalizationSupported(
458  OverrideDataType(input, dataType),
459  OverrideDataType(output, dataType),
460  descriptor,
461  reason);
462  break;
463  }
465  {
466  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
467  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
468 
469  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
470  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
471 
472  result = layerSupportObject.IsL2NormalizationSupported(
473  OverrideDataType(input, dataType),
474  OverrideDataType(output, dataType),
475  descriptor,
476  reason);
477  break;
478  }
480  {
481  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
482 
483  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
484  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
485  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
486 
487  result = layerSupportObject.IsLogicalBinarySupported(input0,
488  input1,
489  output,
490  cLayer->GetParameters(),
491  reason);
492  break;
493  }
495  {
496  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
497 
498  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
499  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
500 
501  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
502  OverrideDataType(output, dataType),
503  cLayer->GetParameters(),
504  reason);
505  break;
506  }
507  case LayerType::Lstm:
508  {
509  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
510  const LstmDescriptor& descriptor = cLayer->GetParameters();
511 
512  // All inputs.
513  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
514  dataType);
515  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
516  dataType);
517  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
518  dataType);
519  // All outputs
520  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
521  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
522  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
523  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
524 
525  // Basic parameters
526  const TensorInfo& inputToForgetWeights
527  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
528  const TensorInfo& inputToCellWeights
529  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
530  const TensorInfo& inputToOutputWeights
531  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
532  const TensorInfo& recurrentToForgetWeights
533  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
534  const TensorInfo& recurrentToCellWeights
535  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
536  const TensorInfo& recurrentToOutputWeights
537  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
538  const TensorInfo& forgetGateBias
539  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
540  const TensorInfo& cellBias
541  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
542  const TensorInfo& outputGateBias
543  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
544 
545  LstmInputParamsInfo paramsInfo;
546 
547  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
548  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
549  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
550  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
551  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
552  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
553  paramsInfo.m_ForgetGateBias = &forgetGateBias;
554  paramsInfo.m_CellBias = &cellBias;
555  paramsInfo.m_OutputGateBias = &outputGateBias;
556 
557 
558  // Optional parameters
559  TensorInfo optInputToInputWeights;
560  TensorInfo optRecurrentToInputWeights;
561  TensorInfo optCellToInputWeights;
562  TensorInfo optInputGateBias;
563  TensorInfo optProjectionWeights;
564  TensorInfo optProjectionBias;
565  TensorInfo optCellToForgetWeights;
566  TensorInfo optCellToOutputWeights;
567  TensorInfo optInputLayerNormWeights;
568  TensorInfo optForgetLayerNormWeights;
569  TensorInfo optCellLayerNormWeights;
570  TensorInfo optOutputLayerNormWeights;
571 
572  if(!descriptor.m_CifgEnabled)
573  {
574  optInputToInputWeights =
575  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
576  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
577 
578  optRecurrentToInputWeights =
579  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
580  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
581  optInputGateBias =
582  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
583  paramsInfo.m_InputGateBias = &optInputGateBias;
584  }
585 
586  if(descriptor.m_ProjectionEnabled)
587  {
588  optProjectionWeights =
589  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
590  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
591  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
592  {
593  optProjectionBias =
594  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
595  paramsInfo.m_ProjectionBias = &optProjectionBias;
596  }
597  }
598 
599  if(descriptor.m_PeepholeEnabled)
600  {
601  if(!descriptor.m_CifgEnabled)
602  {
603  optCellToInputWeights =
604  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
605  dataType);
606  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
607  }
608  optCellToForgetWeights =
609  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
610  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
611  optCellToOutputWeights =
612  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
613  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
614  }
615 
616  if(descriptor.m_LayerNormEnabled)
617  {
618  if (!descriptor.m_CifgEnabled)
619  {
620  optInputLayerNormWeights = OverrideDataType(
621  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
622  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
623  }
624 
625  optForgetLayerNormWeights = OverrideDataType(
626  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
627  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
628 
629  optCellLayerNormWeights = OverrideDataType(
630  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
631  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
632 
633  optOutputLayerNormWeights = OverrideDataType(
634  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
635  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
636  }
637 
638  result = layerSupportObject.IsLstmSupported(
639  input,
640  outputStateIn,
641  cellStateIn,
642  scratchBuffer,
643  outputStateOut,
644  cellStateOut,
645  output,
646  descriptor,
647  paramsInfo,
648  reason);
649  break;
650  }
651  case LayerType::Maximum:
652  {
653  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
654  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
655  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
656 
657  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
658  OverrideDataType(input1, dataType),
659  OverrideDataType(output, dataType),
660  reason);
661  break;
662  }
663  case LayerType::MemCopy:
664  {
665  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
666  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
667 
668  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
669  OverrideDataType(output, dataType),
670  reason);
671  break;
672  }
674  {
675  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
676  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
677 
678  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
679  OverrideDataType(output, dataType),
680  reason);
681  break;
682  }
683  case LayerType::Merge:
684  {
685  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
686  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
687  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
688 
689  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
690  OverrideDataType(input1, dataType),
691  OverrideDataType(output, dataType),
692  reason);
693  break;
694  }
695  case LayerType::Concat:
696  {
697  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
698 
699  // Get vector of all inputs.
700  auto getTensorInfo = [&dataType](const InputSlot& slot)
701  {
702  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
703  };
704 
705  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
706  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
707  std::vector<TensorInfo> inputs(beginI, endI);
708 
709  auto getTensorInfoPtr = [](const TensorInfo& info)
710  {
711  return &info;
712  };
713 
714  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
715  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
716  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
717 
718  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
719 
720  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
721 
722 
723  break;
724  }
726  {
727  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
728  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
729  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
730  result = layerSupportObject.IsMultiplicationSupported(
731  OverrideDataType(input0, dataType),
732  OverrideDataType(input1, dataType),
733  OverrideDataType(output, dataType),
734  reason);
735  break;
736  }
738  {
739  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
740  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
741  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
742  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
743  OverrideDataType(output, dataType),
744  cLayer->GetParameters(),
745  reason);
746  break;
747  }
748  case LayerType::Output:
749  {
750  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
751  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
752  break;
753  }
754  case LayerType::Permute:
755  {
756  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
757  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
758  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
759  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
760  OverrideDataType(output, dataType),
761  cLayer->GetParameters(),
762  reason);
763  break;
764  }
765  case LayerType::Pad:
766  {
767  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
768  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
769  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
770  result = layerSupportObject.IsPadSupported(
771  OverrideDataType(input, dataType),
772  OverrideDataType(output, dataType),
773  cLayer->GetParameters(),
774  reason);
775  break;
776  }
778  {
779  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
780  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
781  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
782  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
783  OverrideDataType(output, dataType),
784  cLayer->GetParameters(),
785  reason);
786  break;
787  }
789  {
790  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
791  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
792  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
793  cLayer->GetParameters(),
794  reason);
795  break;
796  }
797  case LayerType::Quantize:
798  {
799  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
800  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
801  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
802  break;
803  }
804  case LayerType::QLstm:
805  {
806  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
807  const QLstmDescriptor& descriptor = cLayer->GetParameters();
808 
809  // Inputs
810  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
811  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
812  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
813 
814  // Outputs
815  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
816  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
817  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
818 
819  // Lstm parameters
820  LstmInputParamsInfo paramsInfo;
821 
822  // Basic parameters
823  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
824  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
825  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
826  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
827  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
828  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
829 
830  paramsInfo.m_RecurrentToForgetWeights =
831  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
832  paramsInfo.m_RecurrentToCellWeights =
833  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
834  paramsInfo.m_RecurrentToOutputWeights =
835  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
836 
837  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
838  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
839  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
840 
841  if(!descriptor.m_CifgEnabled)
842  {
843  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
844  paramsInfo.m_RecurrentToInputWeights =
845  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
846  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
847  }
848 
849  if(descriptor.m_ProjectionEnabled)
850  {
851  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
852 
853  // Projection bias is optional even if projection is enabled
854  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
855  {
856  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
857  }
858  }
859 
860  if(descriptor.m_PeepholeEnabled)
861  {
862  if (!descriptor.m_CifgEnabled)
863  {
864  paramsInfo.m_CellToInputWeights =
865  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
866  }
867 
868  paramsInfo.m_CellToForgetWeights =
869  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
870  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
871  }
872 
873  if(descriptor.m_LayerNormEnabled)
874  {
875  if (!descriptor.m_CifgEnabled)
876  {
877  paramsInfo.m_InputLayerNormWeights =
878  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
879  }
880 
881  paramsInfo.m_ForgetLayerNormWeights =
882  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
883  paramsInfo.m_CellLayerNormWeights =
884  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
885  paramsInfo.m_OutputLayerNormWeights =
886  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
887  }
888 
889  result = layerSupportObject.IsQLstmSupported(input,
890  previousOutputIn,
891  previousCellStateIn,
892  outputStateOut,
893  cellStateOut,
894  output,
895  descriptor,
896  paramsInfo,
897  reason);
898  break;
899  }
901  {
902  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
903 
904  // Inputs
905  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
906  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
907  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
908 
909  // Outputs
910  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
911  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
912 
913  // QuantizedLstm parameters
914  QuantizedLstmInputParamsInfo paramsInfo;
915 
916  paramsInfo.m_InputToInputWeights =
917  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
918  paramsInfo.m_InputToForgetWeights =
919  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
920  paramsInfo.m_InputToCellWeights =
921  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
922  paramsInfo.m_InputToOutputWeights =
923  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
924 
925  paramsInfo.m_RecurrentToInputWeights =
926  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
927  paramsInfo.m_RecurrentToForgetWeights =
928  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
929  paramsInfo.m_RecurrentToCellWeights =
930  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
931  paramsInfo.m_RecurrentToOutputWeights =
932  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
933 
934  paramsInfo.m_InputGateBias =
935  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
936  paramsInfo.m_ForgetGateBias =
937  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
938  paramsInfo.m_CellBias =
939  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
940  paramsInfo.m_OutputGateBias =
941  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
942 
943  result = layerSupportObject.IsQuantizedLstmSupported(input,
944  previousCellStateIn,
945  previousOutputIn,
946  cellStateOut,
947  output,
948  paramsInfo,
949  reason);
950  break;
951  }
952  case LayerType::Division:
953  {
954  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
955  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
956  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
957  result = layerSupportObject.IsDivisionSupported(
958  OverrideDataType(input0, dataType),
959  OverrideDataType(input1, dataType),
960  OverrideDataType(output, dataType),
961  reason);
962  break;
963  }
964  case LayerType::Rank:
965  {
966  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
967  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
968  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
969  OverrideDataType(output, dataType),
970  reason);
971  break;
972  }
973  case LayerType::Reshape:
974  {
975  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
976  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
977  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
978  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
979  OverrideDataType(output, dataType),
980  cLayer->GetParameters(),
981  reason);
982  break;
983  }
984  case LayerType::Resize:
985  {
986  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
987  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
988  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
989  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
990  OverrideDataType(output, dataType),
991  cLayer->GetParameters(),
992  reason);
993  break;
994  }
995  case LayerType::Shape:
996  {
997  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
998  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
999 
1000  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1001  OverrideDataType(output, dataType),
1002  reason);
1003  break;
1004  }
1005  case LayerType::Slice:
1006  {
1007  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1008 
1009  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1010  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1011 
1012  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1013  OverrideDataType(output, dataType),
1014  cLayer->GetParameters(),
1015  reason);
1016  break;
1017  }
1018  case LayerType::Softmax:
1019  {
1020  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1021  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1022  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1023  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1024  OverrideDataType(output, dataType),
1025  cLayer->GetParameters(),
1026  reason);
1027  break;
1028  }
1030  {
1031  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1032  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1033  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1034  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1035  OverrideDataType(output, dataType),
1036  cLayer->GetParameters(),
1037  reason);
1038  break;
1039  }
1041  {
1042  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1043 
1044  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1045  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1046 
1047  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1048  OverrideDataType(output, dataType),
1049  cLayer->GetParameters(),
1050  reason);
1051  break;
1052  }
1053  case LayerType::Splitter:
1054  {
1055  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1056  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1057 
1058  // Get vector of all outputs.
1059  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1060  {
1061  return OverrideDataType(slot.GetTensorInfo(), dataType);
1062  };
1063  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1064  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1065  std::vector<TensorInfo> outputs(beginI, endI);
1066 
1067  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1068 
1069  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1070  outputPtrs,
1071  cLayer->GetParameters(),
1072  reason);
1073  break;
1074  }
1075  case LayerType::Stack:
1076  {
1077  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1078 
1079  // Get vector of all inputs.
1080  auto getTensorInfo = [&dataType](const InputSlot& slot)
1081  {
1082  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1083  };
1084  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1085  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1086  std::vector<TensorInfo> inputs(beginI, endI);
1087 
1088  auto getTensorInfoPtr = [](const TensorInfo& info)
1089  {
1090  return &info;
1091  };
1092  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1093  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1094  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1095 
1096  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1097 
1098  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1099 
1100  break;
1101  }
1102  case LayerType::StandIn:
1103  {
1104  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1105 
1106  // Get vector of all inputs.
1107  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1108  {
1109  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1110  };
1111  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1112  {
1113  return OverrideDataType(slot.GetTensorInfo(), dataType);
1114  };
1115  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1116  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1117  std::vector<TensorInfo> inputs(beginI, endI);
1118 
1119  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1120  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1121  std::vector<TensorInfo> outputs(beginO, endO);
1122 
1123 
1124  auto getTensorInfoPtr = [](const TensorInfo& info)
1125  {
1126  return &info;
1127  };
1128  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1129  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1130  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1131 
1132  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1133  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1134  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1135 
1136 
1137  result = layerSupportObject.IsStandInSupported(inputPtrs,
1138  outputPtrs,
1139  cLayer->GetParameters(),
1140  reason);
1141  break;
1142  }
1144  {
1145  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1146  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1147  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1148  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1149  OverrideDataType(output, dataType),
1150  cLayer->GetParameters(),
1151  reason);
1152  break;
1153  }
1155  {
1156  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1157  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1159  result = layerSupportObject.IsSubtractionSupported(
1160  OverrideDataType(input0, dataType),
1161  OverrideDataType(input1, dataType),
1162  OverrideDataType(output, dataType),
1163  reason);
1164  break;
1165  }
1166  case LayerType::Switch:
1167  {
1168  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1169  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1170  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1171  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1172  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1173  OverrideDataType(input1, dataType),
1174  OverrideDataType(output0, dataType),
1175  OverrideDataType(output1, dataType),
1176  reason);
1177  break;
1178  }
1179  case LayerType::Mean:
1180  {
1181  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1182  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1183  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1184  result = layerSupportObject.IsMeanSupported(
1185  OverrideDataType(input, dataType),
1186  OverrideDataType(output, dataType),
1187  cLayer->GetParameters(),
1188  reason);
1189  break;
1190  }
1191  case LayerType::Minimum:
1192  {
1193  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1194  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1195  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1196  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1197  OverrideDataType(input1, dataType),
1198  OverrideDataType(output, dataType),
1199  reason);
1200  break;
1201  }
1202  case LayerType::Prelu:
1203  {
1204  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1205  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1206  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1207  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1208  OverrideDataType(alpha, dataType),
1209  OverrideDataType(output, dataType),
1210  reason);
1211  break;
1212  }
1213  case LayerType::Transpose:
1214  {
1215  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1216  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1217  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1218  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1219  OverrideDataType(output, dataType),
1220  cLayer->GetParameters(),
1221  reason);
1222  break;
1223  }
1225  {
1226  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1227 
1228  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1229  dataType);
1230  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1231 
1232  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1233 
1234  Optional<TensorInfo> biases;
1235  if (descriptor.m_BiasEnabled)
1236  {
1237  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1238  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1239  GetBiasTypeFromWeightsType(dataType));
1240  }
1241 
1242  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1243  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1244 
1245  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1246  output,
1247  descriptor,
1248  weights,
1249  biases,
1250  reason);
1251 
1252  break;
1253  }
1254  case LayerType::Reduce:
1255  {
1256  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1257  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1258  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1259 
1260  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1261  OverrideDataType(output, dataType),
1262  cLayer->GetParameters(),
1263  reason);
1264  break;
1265  }
1267  {
1268  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1269  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1270 
1271  // All inputs.
1272  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1273  dataType);
1274  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1275  dataType);
1276  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1277  dataType);
1278  // Outputs
1279  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1280 
1281  // Basic parameters
1282  const TensorInfo& inputToForgetWeights
1283  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1284  const TensorInfo& inputToCellWeights
1285  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1286  const TensorInfo& inputToOutputWeights
1287  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1288  const TensorInfo& recurrentToForgetWeights
1289  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1290  const TensorInfo& recurrentToCellWeights
1291  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1292  const TensorInfo& recurrentToOutputWeights
1293  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1294  const TensorInfo& forgetGateBias
1295  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1296  const TensorInfo& cellBias
1297  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1298  const TensorInfo& outputGateBias
1299  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1300 
1301  LstmInputParamsInfo paramsInfo;
1302 
1303  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1304  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1305  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1306  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1307  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1308  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1309  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1310  paramsInfo.m_CellBias = &cellBias;
1311  paramsInfo.m_OutputGateBias = &outputGateBias;
1312 
1313  // Optional parameters
1314  TensorInfo optInputToInputWeights;
1315  TensorInfo optRecurrentToInputWeights;
1316  TensorInfo optCellToInputWeights;
1317  TensorInfo optInputGateBias;
1318  TensorInfo optProjectionWeights;
1319  TensorInfo optProjectionBias;
1320  TensorInfo optCellToForgetWeights;
1321  TensorInfo optCellToOutputWeights;
1322  TensorInfo optInputLayerNormWeights;
1323  TensorInfo optForgetLayerNormWeights;
1324  TensorInfo optCellLayerNormWeights;
1325  TensorInfo optOutputLayerNormWeights;
1326 
1327  if(!descriptor.m_CifgEnabled)
1328  {
1329  optInputToInputWeights =
1330  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1331  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1332 
1333  optRecurrentToInputWeights =
1334  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1335  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1336  optInputGateBias =
1337  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1338  paramsInfo.m_InputGateBias = &optInputGateBias;
1339  }
1340 
1341  if(descriptor.m_ProjectionEnabled)
1342  {
1343  optProjectionWeights =
1344  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1345  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1346  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1347  {
1348  optProjectionBias =
1349  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1350  paramsInfo.m_ProjectionBias = &optProjectionBias;
1351  }
1352  }
1353 
1354  if(descriptor.m_PeepholeEnabled)
1355  {
1356  if(!descriptor.m_CifgEnabled)
1357  {
1358  optCellToInputWeights =
1359  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1360  dataType);
1361  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1362  }
1363  optCellToForgetWeights =
1364  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1365  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1366  optCellToOutputWeights =
1367  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1368  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1369  }
1370 
1371  if(descriptor.m_LayerNormEnabled)
1372  {
1373  if (!descriptor.m_CifgEnabled)
1374  {
1375  optInputLayerNormWeights = OverrideDataType(
1376  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1377  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1378  }
1379 
1380  optForgetLayerNormWeights = OverrideDataType(
1381  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1382  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1383 
1384  optCellLayerNormWeights = OverrideDataType(
1385  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1386  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1387 
1388  optOutputLayerNormWeights = OverrideDataType(
1389  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1390  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1391  }
1392 
1393  Optional<TensorInfo> hiddenStateOut;
1394  Optional<TensorInfo> cellStateOut;
1395 
1396  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1397  outputStateIn,
1398  cellStateIn,
1399  output,
1400  hiddenStateOut,
1401  cellStateOut,
1402  descriptor,
1403  paramsInfo,
1404  reason);
1405  break;
1406  }
1407  default:
1408  {
1409  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1410  reason.value() = "Unrecognised layer type";
1411  result = false;
1412  break;
1413  }
1414  }
1415  return result;
1416 }
1417 
1419  const IConnectableLayer& connectableLayer,
1420  Optional<DataType> dataType,
1421  std::string& outReasonIfUnsupported)
1422 {
1423  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1424 }
1425 
1427  Optional<DataType> dataType,
1428  std::string& outReasonIfUnsupported)
1429 {
1430  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1431  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1432 }
1433 
1434 // TODO merge with defaulted modelOptions above
1436  Optional<DataType> dataType,
1437  std::string& outReasonIfUnsupported,
1438  const ModelOptions& modelOptions)
1439 {
1440  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1441  return IsLayerConfigurationSupported(layer->GetBackendId(),
1442  connectableLayer,
1443  dataType,
1444  outReasonIfUnsupported,
1445  modelOptions);
1446 }
1447 
1449  const IConnectableLayer& connectableLayer,
1450  Optional<DataType> dataType,
1451  std::string& outReasonIfUnsupported,
1452  const ModelOptions& modelOptions)
1453 {
1454  return IsLayerConfigurationSupported(backendId,
1455  connectableLayer,
1456  dataType,
1457  outReasonIfUnsupported,
1458  modelOptions);
1459 }
1460 
1461 // Default Implementations
1462 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1463  const WorkloadInfo& /*info*/) const
1464 {
1465  return std::unique_ptr<IWorkload>();
1466 }
1467 
1468 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1469  const WorkloadInfo& /*info*/) const
1470 {
1471  return std::unique_ptr<IWorkload>();
1472 }
1473 
1474 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1475  const WorkloadInfo& /*info*/) const
1476 {
1477  return std::unique_ptr<IWorkload>();
1478 }
1479 
1480 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1481  const WorkloadInfo& /*info*/) const
1482 {
1483  return std::unique_ptr<IWorkload>();
1484 }
1485 
1487  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1488 {
1489  return std::unique_ptr<IWorkload>();
1490 }
1491 
1493  const WorkloadInfo& /*Info*/) const
1494 {
1495  return std::unique_ptr<IWorkload>();
1496 }
1497 
1498 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1499  const WorkloadInfo& /*info*/) const
1500 {
1501  return std::unique_ptr<IWorkload>();
1502 }
1503 
1504 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1505  const WorkloadInfo& /*info*/) const
1506 {
1507  return std::unique_ptr<IWorkload>();
1508 }
1509 
1510 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1511  const WorkloadInfo& /*info*/) const
1512 {
1513  return std::unique_ptr<IWorkload>();
1514 }
1515 
1516 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1517  const WorkloadInfo& /*info*/) const
1518 {
1519  return std::unique_ptr<IWorkload>();
1520 }
1521 
1523  const WorkloadInfo& /*info*/) const
1524 {
1525  return std::unique_ptr<IWorkload>();
1526 }
1527 
1529  const WorkloadInfo& /*info*/) const
1530 {
1531  return std::unique_ptr<IWorkload>();
1532 }
1533 
1535  const WorkloadInfo& /*info*/) const
1536 {
1537  return std::unique_ptr<IWorkload>();
1538 }
1539 
1541  const WorkloadInfo& /*info*/) const
1542 {
1543  return std::unique_ptr<IWorkload>();
1544 }
1545 
1546 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1547  const WorkloadInfo& /*info*/) const
1548 {
1549  return std::unique_ptr<IWorkload>();
1550 }
1551 
1552 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1553  const WorkloadInfo& /*info*/) const
1554 {
1555  return std::unique_ptr<IWorkload>();
1556 }
1557 
1558 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1559  const WorkloadInfo& /*info*/) const
1560 {
1561  return std::unique_ptr<IWorkload>();
1562 }
1563 
1565  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1566 {
1567  return std::unique_ptr<IWorkload>();
1568 }
1569 
1570 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1571  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1572 {
1573  return std::unique_ptr<IWorkload>();
1574 }
1575 
1577  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1578 {
1579  return std::unique_ptr<IWorkload>();
1580 }
1581 
1582 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1583  const WorkloadInfo& /*info*/) const
1584 {
1585  return std::unique_ptr<IWorkload>();
1586 }
1587 
1589  const WorkloadInfo& /*info*/) const
1590 {
1591  return std::unique_ptr<IWorkload>();
1592 }
1593 
1594 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1595  const WorkloadInfo& /*Info*/) const
1596 {
1597  return std::unique_ptr<IWorkload>();
1598 }
1599 
1601  const WorkloadInfo& /*info*/) const
1602 {
1603  return std::unique_ptr<IWorkload>();
1604 }
1605 
1606 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
1607  const WorkloadInfo& /*info*/) const
1608 {
1609  return std::unique_ptr<IWorkload>();
1610 }
1611 
1612 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1613  const WorkloadInfo& /*info*/) const
1614 {
1615  return std::unique_ptr<IWorkload>();
1616 }
1617 
1618 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1619  const WorkloadInfo& /*info*/) const
1620 {
1621  return std::unique_ptr<IWorkload>();
1622 }
1623 
1624 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1625  const WorkloadInfo& /*info*/) const
1626 {
1627  return std::unique_ptr<IWorkload>();
1628 }
1629 
1630 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1631  const WorkloadInfo& /*info*/) const
1632 {
1633  return std::unique_ptr<IWorkload>();
1634 }
1635 
1637  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1638  const WorkloadInfo& /*info*/) const
1639 {
1640  return std::unique_ptr<IWorkload>();
1641 }
1642 
1644  const WorkloadInfo& /*info*/) const
1645 {
1646  return std::unique_ptr<IWorkload>();
1647 }
1648 
1649 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
1650  const WorkloadInfo& /*info*/) const
1651 {
1652  return std::unique_ptr<IWorkload>();
1653 }
1654 
1656  const WorkloadInfo& /*info*/) const
1657 {
1658  return std::unique_ptr<IWorkload>();
1659 }
1660 
1661 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1662  const WorkloadInfo& /*info*/) const
1663 {
1664  return std::unique_ptr<IWorkload>();
1665 }
1666 
1667 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1668  const WorkloadInfo& /*info*/) const
1669 {
1670  return std::unique_ptr<IWorkload>();
1671 }
1672 
1673 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1674  const WorkloadInfo& /*info*/) const
1675 {
1676  return std::unique_ptr<IWorkload>();
1677 }
1678 
1679 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1680  const WorkloadInfo& /*Info*/) const
1681 {
1682  return std::unique_ptr<IWorkload>();
1683 }
1684 
1685 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1686  const WorkloadInfo& /*info*/) const
1687 {
1688  return std::unique_ptr<IWorkload>();
1689 }
1690 
1691 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1692  const WorkloadInfo& /*info*/) const
1693 {
1694  return std::unique_ptr<IWorkload>();
1695 }
1696 
1697 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1698  const WorkloadInfo& /*info*/) const
1699 {
1700  return std::unique_ptr<IWorkload>();
1701 }
1702 
1703 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1704  const WorkloadInfo& /*info*/) const
1705 {
1706  return std::unique_ptr<IWorkload>();
1707 }
1708 
1709 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1710  const WorkloadInfo& /*info*/) const
1711 {
1712  return std::unique_ptr<IWorkload>();
1713 }
1714 
1715 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1716  const WorkloadInfo& /*info*/) const
1717 {
1718  return std::unique_ptr<IWorkload>();
1719 }
1720 
1721 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1722  const WorkloadInfo& /*info*/) const
1723 {
1724  return std::unique_ptr<IWorkload>();
1725 }
1726 
1727 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1728  const WorkloadInfo& /*info*/) const
1729 {
1730  return std::unique_ptr<IWorkload>();
1731 }
1732 
1733 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1734  const WorkloadInfo& /*Info*/) const
1735 {
1736  return std::unique_ptr<IWorkload>();
1737 }
1738 
1739 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1740  const WorkloadInfo& /*info*/) const
1741 {
1742  return std::unique_ptr<IWorkload>();
1743 }
1744 
1745 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1746  const WorkloadInfo& /*info*/) const
1747 {
1748  return std::unique_ptr<IWorkload>();
1749 }
1750 
1751 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1752  const WorkloadInfo& /*info*/) const
1753 {
1754  return std::unique_ptr<IWorkload>();
1755 }
1756 
1757 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1758  const WorkloadInfo &/*info*/) const
1759 {
1760  return std::unique_ptr<IWorkload>();
1761 }
1762 
1763 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1764  const WorkloadInfo& /*Info*/) const
1765 {
1766  return std::unique_ptr<IWorkload>();
1767 }
1768 
1769 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
1770  const WorkloadInfo& /*info*/) const
1771 {
1772  return std::unique_ptr<IWorkload>();
1773 }
1774 
1775 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1776  const WorkloadInfo& /*info*/) const
1777 {
1778  return std::unique_ptr<IWorkload>();
1779 }
1780 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
1781  const WorkloadInfo& /*info*/) const
1782 {
1783  return std::unique_ptr<IWorkload>();
1784 }
1785 
1786 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
1787  const WorkloadInfo& /*info*/) const
1788 {
1789  return std::unique_ptr<IWorkload>();
1790 }
1791 
1792 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1793  const WorkloadInfo& /*info*/) const
1794 {
1795  return std::unique_ptr<IWorkload>();
1796 }
1797 
1798 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1799  const WorkloadInfo& /*info*/) const
1800 {
1801  return std::unique_ptr<IWorkload>();
1802 }
1803 
1804 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1805  const WorkloadInfo& /*info*/) const
1806 {
1807  return std::unique_ptr<IWorkload>();
1808 }
1809 
1810 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1811  const WorkloadInfo& /*info*/) const
1812 {
1813  return std::unique_ptr<IWorkload>();
1814 }
1815 
1816 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
1817  const WorkloadInfo& /*info*/) const
1818 {
1819  return std::unique_ptr<IWorkload>();
1820 }
1821 
1822 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1823  const WorkloadInfo& /*info*/) const
1824 {
1825  return std::unique_ptr<IWorkload>();
1826 }
1827 
1828 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1829  const WorkloadInfo& /*info*/) const
1830 {
1831  return std::unique_ptr<IWorkload>();
1832 }
1833 
1834 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1835  const WorkloadInfo& /*info*/) const
1836 {
1837  return std::unique_ptr<IWorkload>();
1838 }
1839 
1840 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1841  const WorkloadInfo& /*info*/) const
1842 {
1843  return std::unique_ptr<IWorkload>();
1844 }
1845 
1846 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1847  const WorkloadInfo& /*info*/) const
1848 {
1849  return std::unique_ptr<IWorkload>();
1850 }
1851 
1852 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1853  const WorkloadInfo& /*info*/) const
1854 {
1855  return std::unique_ptr<IWorkload>();
1856 }
1857 
1858 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1859  const WorkloadInfo& /*info*/) const
1860 {
1861  return std::unique_ptr<IWorkload>();
1862 }
1863 
1864 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1865  const WorkloadInfo& /*info*/) const
1866 {
1867  return std::unique_ptr<IWorkload>();
1868 }
1869 
1870 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1871  const WorkloadInfo& /*info*/) const
1872 {
1873  return std::unique_ptr<IWorkload>();
1874 }
1875 
1876 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1877  const WorkloadInfo& /*info*/) const
1878 {
1879  return std::unique_ptr<IWorkload>();
1880 }
1881 
1883  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1884  const WorkloadInfo& /*info*/) const
1885 {
1886  return std::unique_ptr<IWorkload>();
1887 }
1888 
1890  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
1891  const WorkloadInfo& /*info*/) const
1892 {
1893  return std::unique_ptr<IWorkload>();
1894 }
1895 
1896 } // namepsace armnn
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< BackendOptions > ModelOptions
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQLstm(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
Copyright (c) 2021 ARM Limited and Contributors.
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateUnidirectionalSequenceLstm(const UnidirectionalSequenceLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateCast(const CastQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LstmDescriptor UnidirectionalSequenceLstmDescriptor
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalBinary(const LogicalBinaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFill(const FillQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateShape(const ShapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
Depthwise Convolution 2D layer workload data.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const