ArmNN
 21.11
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
21 
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 namespace
28 {
29 using LayerList = std::list<Layer*>;
30 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
31 
32 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
33 {
34  if (!type)
35  {
36  return info;
37  }
38 
39  return TensorInfo(info.GetShape(),
40  type.value(),
41  info.GetQuantizationScale(),
42  info.GetQuantizationOffset(),
43  info.IsConstant());
44 }
45 
46 } // anonymous namespace
47 
48 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
49  const IConnectableLayer& connectableLayer,
50  Optional<DataType> dataType,
51  std::string& outReasonIfUnsupported,
52  const ModelOptions& modelOptions)
53 {
54  Optional<std::string&> reason = outReasonIfUnsupported;
55  bool result;
56  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
57 
58  auto const& backendRegistry = BackendRegistryInstance();
59  if (!backendRegistry.IsBackendRegistered(backendId))
60  {
61  std::stringstream ss;
62  ss << connectableLayer.GetName() << " is not supported on " << backendId
63  << " because this backend is not registered.";
64 
65  outReasonIfUnsupported = ss.str();
66  return false;
67  }
68 
69  auto backendFactory = backendRegistry.GetFactory(backendId);
70  auto backendObject = backendFactory();
71  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
72 
73  switch(layer.GetType())
74  {
76  {
77  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
78  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
79  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
80  result = layerSupportObject.IsActivationSupported(
81  OverrideDataType(input, dataType),
82  OverrideDataType(output, dataType),
83  cLayer->GetParameters(),
84  reason);
85  break;
86  }
88  {
89  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
90  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
91  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
92  result = layerSupportObject.IsAdditionSupported(
93  OverrideDataType(input0, dataType),
94  OverrideDataType(input1, dataType),
95  OverrideDataType(output, dataType),
96  reason);
97  break;
98  }
100  {
101  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
102  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
103 
104  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
105  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
106  result = layerSupportObject.IsArgMinMaxSupported(
107  OverrideDataType(input, dataType),
108  OverrideDataType(output, DataType::Signed32),
109  descriptor,
110  reason);
111  break;
112  }
114  {
115  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
116  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
117  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
118  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
119  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
120  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
121  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
122  result = layerSupportObject.IsBatchNormalizationSupported(
123  OverrideDataType(input, dataType),
124  OverrideDataType(output, dataType),
125  OverrideDataType(mean, dataType),
126  OverrideDataType(var, dataType),
127  OverrideDataType(beta, dataType),
128  OverrideDataType(gamma, dataType),
129  cLayer->GetParameters(),
130  reason);
131  break;
132  }
134  {
135  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
136  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
137  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
138 
139  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
140  OverrideDataType(output, dataType),
141  cLayer->GetParameters(),
142  reason);
143  break;
144  }
145  case LayerType::Cast:
146  {
147  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
148  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
149 
150  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
151  OverrideDataType(output, dataType),
152  reason);
153  break;
154  }
156  {
157  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
158 
159  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
160  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
161 
162  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
163 
164  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
165  OverrideDataType(output, dataType),
166  descriptor,
167  reason);
168  break;
169  }
171  {
172  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
173 
174  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
175  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
176  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
177 
178  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
179  OverrideDataType(input1, dataType),
180  OverrideDataType(output, DataType::Boolean),
181  cLayer->GetParameters(),
182  reason);
183  break;
184  }
185  case LayerType::Constant:
186  {
187  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
188  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
189  break;
190  }
192  {
193  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
194  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
195  result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
196  break;
197  }
199  {
200  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
201  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
202  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
203  break;
204  }
206  {
207  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
208  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
209  result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
210  break;
211  }
213  {
214  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
215  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
216  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
217  break;
218  }
220  {
221  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
222 
223  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
224  dataType);
225  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
226  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
227 
228  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
229 
230  // Construct optional biases object based on the value of m_BiasEnabled
231  Optional<TensorInfo> biases;
232  if (descriptor.m_BiasEnabled)
233  {
234  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
235  }
236 
237  result = layerSupportObject.IsConvolution2dSupported(
238  input,
239  output,
240  descriptor,
241  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
242  biases,
243  reason);
244  break;
245  }
247  {
248  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
249 
250  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
251  dataType);
252  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
253 
254  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
255  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
256  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
257  dataType);
258 
259  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
260 
261  // Construct optional biases object based on the value of m_BiasEnabled
262  Optional<TensorInfo> biases;
263  if (descriptor.m_BiasEnabled)
264  {
265  biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
266  GetBiasTypeFromWeightsType(dataType));
267  }
268 
269  result = layerSupportObject.IsConvolution3dSupported(
270  input,
271  output,
272  descriptor,
273  weights,
274  biases,
275  reason);
276  break;
277  }
278  case LayerType::Debug:
279  {
280  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
281  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
282 
283  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
284  OverrideDataType(output, dataType),
285  reason);
286  break;
287  }
289  {
290  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
291 
292  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
293  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
294 
295  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
296  OverrideDataType(output, dataType),
297  cLayer->GetParameters(),
298  reason);
299  break;
300  }
302  {
303  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
304  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
305  dataType);
306  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
307  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
308 
309  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
310 
311  // Construct optional biases object based on the value of m_BiasEnabled
312  Optional<TensorInfo> biases;
313  if (descriptor.m_BiasEnabled)
314  {
315  biases =
316  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
317  }
318 
319  result = layerSupportObject.IsDepthwiseConvolutionSupported(
320  input,
321  output,
322  descriptor,
323  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
324  biases,
325  reason);
326  break;
327  }
329  {
330  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
331  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
332 
333  result = layerSupportObject.IsDequantizeSupported(input,
334  OverrideDataType(output, dataType),
335  reason);
336  break;
337  }
339  {
340  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
341  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
342  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
343  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
344 
345  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
346  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
347  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
348  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
349 
350  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
351  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
352  scores,
353  anchors,
354  detectionBoxes,
355  detectionClasses,
356  detectionScores,
357  numDetections,
358  descriptor,
359  reason);
360  break;
361  }
363  {
364  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
365 
366  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
367  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
368 
369  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
370  OverrideDataType(output, dataType),
371  cLayer->GetParameters(),
372  reason);
373  break;
374  }
375  case LayerType::Fill:
376  {
377  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
378  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
379  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
380  const FillDescriptor& descriptor = cLayer->GetParameters();
381 
382  result = layerSupportObject.IsFillSupported(
383  OverrideDataType(input, dataType),
384  OverrideDataType(output, dataType),
385  descriptor,
386  reason);
387  break;
388  }
390  {
391  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
392  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
393  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
394  cLayer->GetParameters(),
395  reason);
396  break;
397  }
398  case LayerType::Floor:
399  {
400  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
401  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
402  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
403  OverrideDataType(output, dataType),
404  reason);
405  break;
406  }
408  {
409  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
410  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
411  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
412 
413  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
414  TensorInfo weightsInfo;
415  const TensorInfo* weightsInfoPtr = nullptr;
416 
417  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
418  weightsInfoPtr = &weightsInfo;
419 
420  TensorInfo biasInfo;
421  const TensorInfo* biasInfoPtr = nullptr;
422  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
423  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
424  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
425  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
426 
427  if (descriptor.m_BiasEnabled)
428  {
429  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
430  biasInfoPtr = &biasInfo;
431  }
432  else
433  {
434  // If biases are not enabled pass a dummy tensorinfo for the validation
435  switch(input.GetDataType())
436  {
437  case DataType::BFloat16:
438  {
439  biasInfoPtr = &dummyBFloat16Bias;
440  break;
441  }
442  case DataType::Float16:
443  {
444  biasInfoPtr = &dummyFloat16Bias;
445  break;
446  }
447  case DataType::Float32:
448  {
449  biasInfoPtr = &dummyFloat32Bias;
450  break;
451  }
452  case DataType::QAsymmU8:
453  case DataType::QAsymmS8:
454  case DataType::QSymmS8:
455  case DataType::QSymmS16:
456  {
457  biasInfoPtr = &dummyQA8Bias;
458  break;
459  }
460  default:
461  {
462  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
463  }
464  }
465  }
466  result = layerSupportObject.IsFullyConnectedSupported(
467  OverrideDataType(input, dataType),
468  OverrideDataType(output, dataType),
469  *weightsInfoPtr,
470  *biasInfoPtr,
471  descriptor,
472  reason);
473  break;
474  }
475  case LayerType::Gather:
476  {
477  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
478  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
479  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
480  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
481  const GatherDescriptor& descriptor = cLayer->GetParameters();
482  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
483  input1,
484  OverrideDataType(output, dataType),
485  descriptor,
486  reason);
487  break;
488  }
489  case LayerType::Input:
490  {
491  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
492  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
493  break;
494  }
496  {
497  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
498  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
499 
500  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
501  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
502 
503  result = layerSupportObject.IsInstanceNormalizationSupported(
504  OverrideDataType(input, dataType),
505  OverrideDataType(output, dataType),
506  descriptor,
507  reason);
508  break;
509  }
511  {
512  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
513  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
514 
515  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
516  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
517 
518  result = layerSupportObject.IsL2NormalizationSupported(
519  OverrideDataType(input, dataType),
520  OverrideDataType(output, dataType),
521  descriptor,
522  reason);
523  break;
524  }
526  {
527  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
528 
529  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
530  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
531  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
532 
533  result = layerSupportObject.IsLogicalBinarySupported(input0,
534  input1,
535  output,
536  cLayer->GetParameters(),
537  reason);
538  break;
539  }
541  {
542  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
543 
544  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
545  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
546 
547  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
548  OverrideDataType(output, dataType),
549  cLayer->GetParameters(),
550  reason);
551  break;
552  }
553  case LayerType::Lstm:
554  {
555  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
556  const LstmDescriptor& descriptor = cLayer->GetParameters();
557 
558  // All inputs.
559  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
560  dataType);
561  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
562  dataType);
563  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
564  dataType);
565  // All outputs
566  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
567  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
568  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
569  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
570 
571  // Basic parameters
572  const TensorInfo& inputToForgetWeights
573  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
574  const TensorInfo& inputToCellWeights
575  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
576  const TensorInfo& inputToOutputWeights
577  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
578  const TensorInfo& recurrentToForgetWeights
579  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
580  const TensorInfo& recurrentToCellWeights
581  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
582  const TensorInfo& recurrentToOutputWeights
583  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
584  const TensorInfo& forgetGateBias
585  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
586  const TensorInfo& cellBias
587  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
588  const TensorInfo& outputGateBias
589  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
590 
591  LstmInputParamsInfo paramsInfo;
592 
593  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
594  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
595  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
596  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
597  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
598  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
599  paramsInfo.m_ForgetGateBias = &forgetGateBias;
600  paramsInfo.m_CellBias = &cellBias;
601  paramsInfo.m_OutputGateBias = &outputGateBias;
602 
603 
604  // Optional parameters
605  TensorInfo optInputToInputWeights;
606  TensorInfo optRecurrentToInputWeights;
607  TensorInfo optCellToInputWeights;
608  TensorInfo optInputGateBias;
609  TensorInfo optProjectionWeights;
610  TensorInfo optProjectionBias;
611  TensorInfo optCellToForgetWeights;
612  TensorInfo optCellToOutputWeights;
613  TensorInfo optInputLayerNormWeights;
614  TensorInfo optForgetLayerNormWeights;
615  TensorInfo optCellLayerNormWeights;
616  TensorInfo optOutputLayerNormWeights;
617 
618  if(!descriptor.m_CifgEnabled)
619  {
620  optInputToInputWeights =
621  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
622  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
623 
624  optRecurrentToInputWeights =
625  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
626  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
627  optInputGateBias =
628  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
629  paramsInfo.m_InputGateBias = &optInputGateBias;
630  }
631 
632  if(descriptor.m_ProjectionEnabled)
633  {
634  optProjectionWeights =
635  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
636  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
637  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
638  {
639  optProjectionBias =
640  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
641  paramsInfo.m_ProjectionBias = &optProjectionBias;
642  }
643  }
644 
645  if(descriptor.m_PeepholeEnabled)
646  {
647  if(!descriptor.m_CifgEnabled)
648  {
649  optCellToInputWeights =
650  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
651  dataType);
652  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
653  }
654  optCellToForgetWeights =
655  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
656  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
657  optCellToOutputWeights =
658  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
659  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
660  }
661 
662  if(descriptor.m_LayerNormEnabled)
663  {
664  if (!descriptor.m_CifgEnabled)
665  {
666  optInputLayerNormWeights = OverrideDataType(
667  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
668  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
669  }
670 
671  optForgetLayerNormWeights = OverrideDataType(
672  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
673  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
674 
675  optCellLayerNormWeights = OverrideDataType(
676  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
677  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
678 
679  optOutputLayerNormWeights = OverrideDataType(
680  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
681  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
682  }
683 
684  result = layerSupportObject.IsLstmSupported(
685  input,
686  outputStateIn,
687  cellStateIn,
688  scratchBuffer,
689  outputStateOut,
690  cellStateOut,
691  output,
692  descriptor,
693  paramsInfo,
694  reason);
695  break;
696  }
697  case LayerType::Maximum:
698  {
699  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
700  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
701  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
702 
703  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
704  OverrideDataType(input1, dataType),
705  OverrideDataType(output, dataType),
706  reason);
707  break;
708  }
709  case LayerType::MemCopy:
710  {
711  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
712  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
713 
714  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
715  OverrideDataType(output, dataType),
716  reason);
717  break;
718  }
720  {
721  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
722  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
723 
724  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
725  OverrideDataType(output, dataType),
726  reason);
727  break;
728  }
729  case LayerType::Merge:
730  {
731  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
732  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
733  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
734 
735  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
736  OverrideDataType(input1, dataType),
737  OverrideDataType(output, dataType),
738  reason);
739  break;
740  }
741  case LayerType::Concat:
742  {
743  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
744 
745  // Get vector of all inputs.
746  auto getTensorInfo = [&dataType](const InputSlot& slot)
747  {
748  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
749  };
750 
751  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
752  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
753  std::vector<TensorInfo> inputs(beginI, endI);
754 
755  auto getTensorInfoPtr = [](const TensorInfo& info)
756  {
757  return &info;
758  };
759 
760  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
761  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
762  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
763 
764  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
765 
766  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
767 
768 
769  break;
770  }
772  {
773  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
774  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
775  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
776  result = layerSupportObject.IsMultiplicationSupported(
777  OverrideDataType(input0, dataType),
778  OverrideDataType(input1, dataType),
779  OverrideDataType(output, dataType),
780  reason);
781  break;
782  }
784  {
785  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
786  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
787  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
788  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
789  OverrideDataType(output, dataType),
790  cLayer->GetParameters(),
791  reason);
792  break;
793  }
794  case LayerType::Output:
795  {
796  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
797  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
798  break;
799  }
800  case LayerType::Permute:
801  {
802  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
803  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
804  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
805  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
806  OverrideDataType(output, dataType),
807  cLayer->GetParameters(),
808  reason);
809  break;
810  }
811  case LayerType::Pad:
812  {
813  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
814  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
815  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
816  result = layerSupportObject.IsPadSupported(
817  OverrideDataType(input, dataType),
818  OverrideDataType(output, dataType),
819  cLayer->GetParameters(),
820  reason);
821  break;
822  }
824  {
825  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
826  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
827  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
828  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
829  OverrideDataType(output, dataType),
830  cLayer->GetParameters(),
831  reason);
832  break;
833  }
835  {
836  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
837  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
838  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
839  cLayer->GetParameters(),
840  reason);
841  break;
842  }
843  case LayerType::Quantize:
844  {
845  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
846  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
847  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
848  break;
849  }
850  case LayerType::QLstm:
851  {
852  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
853  const QLstmDescriptor& descriptor = cLayer->GetParameters();
854 
855  // Inputs
856  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
857  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
858  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
859 
860  // Outputs
861  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
862  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
863  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
864 
865  // Lstm parameters
866  LstmInputParamsInfo paramsInfo;
867 
868  // Basic parameters
869  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
870  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
871  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
872  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
873  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
874  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
875 
876  paramsInfo.m_RecurrentToForgetWeights =
877  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
878  paramsInfo.m_RecurrentToCellWeights =
879  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
880  paramsInfo.m_RecurrentToOutputWeights =
881  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
882 
883  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
884  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
885  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
886 
887  if(!descriptor.m_CifgEnabled)
888  {
889  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
890  paramsInfo.m_RecurrentToInputWeights =
891  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
892  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
893  }
894 
895  if(descriptor.m_ProjectionEnabled)
896  {
897  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
898 
899  // Projection bias is optional even if projection is enabled
900  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
901  {
902  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
903  }
904  }
905 
906  if(descriptor.m_PeepholeEnabled)
907  {
908  if (!descriptor.m_CifgEnabled)
909  {
910  paramsInfo.m_CellToInputWeights =
911  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
912  }
913 
914  paramsInfo.m_CellToForgetWeights =
915  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
916  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
917  }
918 
919  if(descriptor.m_LayerNormEnabled)
920  {
921  if (!descriptor.m_CifgEnabled)
922  {
923  paramsInfo.m_InputLayerNormWeights =
924  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
925  }
926 
927  paramsInfo.m_ForgetLayerNormWeights =
928  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
929  paramsInfo.m_CellLayerNormWeights =
930  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
931  paramsInfo.m_OutputLayerNormWeights =
932  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
933  }
934 
935  result = layerSupportObject.IsQLstmSupported(input,
936  previousOutputIn,
937  previousCellStateIn,
938  outputStateOut,
939  cellStateOut,
940  output,
941  descriptor,
942  paramsInfo,
943  reason);
944  break;
945  }
947  {
948  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
949 
950  // Inputs
951  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
952  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
953  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
954 
955  // Outputs
956  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
957  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
958 
959  // QuantizedLstm parameters
960  QuantizedLstmInputParamsInfo paramsInfo;
961 
962  paramsInfo.m_InputToInputWeights =
963  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
964  paramsInfo.m_InputToForgetWeights =
965  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
966  paramsInfo.m_InputToCellWeights =
967  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
968  paramsInfo.m_InputToOutputWeights =
969  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
970 
971  paramsInfo.m_RecurrentToInputWeights =
972  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
973  paramsInfo.m_RecurrentToForgetWeights =
974  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
975  paramsInfo.m_RecurrentToCellWeights =
976  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
977  paramsInfo.m_RecurrentToOutputWeights =
978  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
979 
980  paramsInfo.m_InputGateBias =
981  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
982  paramsInfo.m_ForgetGateBias =
983  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
984  paramsInfo.m_CellBias =
985  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
986  paramsInfo.m_OutputGateBias =
987  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
988 
989  result = layerSupportObject.IsQuantizedLstmSupported(input,
990  previousCellStateIn,
991  previousOutputIn,
992  cellStateOut,
993  output,
994  paramsInfo,
995  reason);
996  break;
997  }
998  case LayerType::Division:
999  {
1000  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1001  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1002  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1003  result = layerSupportObject.IsDivisionSupported(
1004  OverrideDataType(input0, dataType),
1005  OverrideDataType(input1, dataType),
1006  OverrideDataType(output, dataType),
1007  reason);
1008  break;
1009  }
1010  case LayerType::Rank:
1011  {
1012  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1013  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1014  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1015  OverrideDataType(output, dataType),
1016  reason);
1017  break;
1018  }
1019  case LayerType::Reshape:
1020  {
1021  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1022  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1023  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1024  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1025  OverrideDataType(output, dataType),
1026  cLayer->GetParameters(),
1027  reason);
1028  break;
1029  }
1030  case LayerType::Resize:
1031  {
1032  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1033  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1034  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1035  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1036  OverrideDataType(output, dataType),
1037  cLayer->GetParameters(),
1038  reason);
1039  break;
1040  }
1041  case LayerType::Shape:
1042  {
1043  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1044  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1045 
1046  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1047  OverrideDataType(output, dataType),
1048  reason);
1049  break;
1050  }
1051  case LayerType::Slice:
1052  {
1053  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1054 
1055  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1056  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1057 
1058  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1059  OverrideDataType(output, dataType),
1060  cLayer->GetParameters(),
1061  reason);
1062  break;
1063  }
1064  case LayerType::Softmax:
1065  {
1066  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1067  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1068  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1069  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1070  OverrideDataType(output, dataType),
1071  cLayer->GetParameters(),
1072  reason);
1073  break;
1074  }
1076  {
1077  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1078  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1079  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1080  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1081  OverrideDataType(output, dataType),
1082  cLayer->GetParameters(),
1083  reason);
1084  break;
1085  }
1087  {
1088  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1089 
1090  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1091  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1092 
1093  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1094  OverrideDataType(output, dataType),
1095  cLayer->GetParameters(),
1096  reason);
1097  break;
1098  }
1099  case LayerType::Splitter:
1100  {
1101  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1102  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1103 
1104  // Get vector of all outputs.
1105  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1106  {
1107  return OverrideDataType(slot.GetTensorInfo(), dataType);
1108  };
1109  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1110  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1111  std::vector<TensorInfo> outputs(beginI, endI);
1112 
1113  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1114 
1115  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1116  outputPtrs,
1117  cLayer->GetParameters(),
1118  reason);
1119  break;
1120  }
1121  case LayerType::Stack:
1122  {
1123  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1124 
1125  // Get vector of all inputs.
1126  auto getTensorInfo = [&dataType](const InputSlot& slot)
1127  {
1128  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1129  };
1130  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1131  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1132  std::vector<TensorInfo> inputs(beginI, endI);
1133 
1134  auto getTensorInfoPtr = [](const TensorInfo& info)
1135  {
1136  return &info;
1137  };
1138  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1139  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1140  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1141 
1142  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1143 
1144  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1145 
1146  break;
1147  }
1148  case LayerType::StandIn:
1149  {
1150  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1151 
1152  // Get vector of all inputs.
1153  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1154  {
1155  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1156  };
1157  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1158  {
1159  return OverrideDataType(slot.GetTensorInfo(), dataType);
1160  };
1161  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1162  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1163  std::vector<TensorInfo> inputs(beginI, endI);
1164 
1165  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1166  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1167  std::vector<TensorInfo> outputs(beginO, endO);
1168 
1169 
1170  auto getTensorInfoPtr = [](const TensorInfo& info)
1171  {
1172  return &info;
1173  };
1174  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1175  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1176  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1177 
1178  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1179  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1180  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1181 
1182 
1183  result = layerSupportObject.IsStandInSupported(inputPtrs,
1184  outputPtrs,
1185  cLayer->GetParameters(),
1186  reason);
1187  break;
1188  }
1190  {
1191  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1192  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1193  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1194  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1195  OverrideDataType(output, dataType),
1196  cLayer->GetParameters(),
1197  reason);
1198  break;
1199  }
1201  {
1202  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1203  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1204  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1205  result = layerSupportObject.IsSubtractionSupported(
1206  OverrideDataType(input0, dataType),
1207  OverrideDataType(input1, dataType),
1208  OverrideDataType(output, dataType),
1209  reason);
1210  break;
1211  }
1212  case LayerType::Switch:
1213  {
1214  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1215  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1216  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1217  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1218  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1219  OverrideDataType(input1, dataType),
1220  OverrideDataType(output0, dataType),
1221  OverrideDataType(output1, dataType),
1222  reason);
1223  break;
1224  }
1225  case LayerType::Mean:
1226  {
1227  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1228  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1229  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1230  result = layerSupportObject.IsMeanSupported(
1231  OverrideDataType(input, dataType),
1232  OverrideDataType(output, dataType),
1233  cLayer->GetParameters(),
1234  reason);
1235  break;
1236  }
1237  case LayerType::Minimum:
1238  {
1239  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1240  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1241  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1242  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1243  OverrideDataType(input1, dataType),
1244  OverrideDataType(output, dataType),
1245  reason);
1246  break;
1247  }
1248  case LayerType::Prelu:
1249  {
1250  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1251  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1252  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1253  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1254  OverrideDataType(alpha, dataType),
1255  OverrideDataType(output, dataType),
1256  reason);
1257  break;
1258  }
1259  case LayerType::Transpose:
1260  {
1261  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1262  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1263  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1264  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1265  OverrideDataType(output, dataType),
1266  cLayer->GetParameters(),
1267  reason);
1268  break;
1269  }
1271  {
1272  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1273 
1274  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1275  dataType);
1276  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1277 
1278  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1279 
1280  Optional<TensorInfo> biases;
1281  if (descriptor.m_BiasEnabled)
1282  {
1283  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1284  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1285  GetBiasTypeFromWeightsType(dataType));
1286  }
1287 
1288  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1289  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1290 
1291  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1292  output,
1293  descriptor,
1294  weights,
1295  biases,
1296  reason);
1297 
1298  break;
1299  }
1300  case LayerType::Reduce:
1301  {
1302  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1303  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1304  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1305 
1306  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1307  OverrideDataType(output, dataType),
1308  cLayer->GetParameters(),
1309  reason);
1310  break;
1311  }
1313  {
1314  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1315  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1316 
1317  // All inputs.
1318  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1319  dataType);
1320  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
1321  dataType);
1322  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
1323  dataType);
1324  // Outputs
1325  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1326 
1327  // Basic parameters
1328  const TensorInfo& inputToForgetWeights
1329  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1330  const TensorInfo& inputToCellWeights
1331  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1332  const TensorInfo& inputToOutputWeights
1333  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1334  const TensorInfo& recurrentToForgetWeights
1335  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1336  const TensorInfo& recurrentToCellWeights
1337  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1338  const TensorInfo& recurrentToOutputWeights
1339  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1340  const TensorInfo& forgetGateBias
1341  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1342  const TensorInfo& cellBias
1343  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1344  const TensorInfo& outputGateBias
1345  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1346 
1347  LstmInputParamsInfo paramsInfo;
1348 
1349  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1350  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1351  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1352  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1353  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1354  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1355  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1356  paramsInfo.m_CellBias = &cellBias;
1357  paramsInfo.m_OutputGateBias = &outputGateBias;
1358 
1359  // Optional parameters
1360  TensorInfo optInputToInputWeights;
1361  TensorInfo optRecurrentToInputWeights;
1362  TensorInfo optCellToInputWeights;
1363  TensorInfo optInputGateBias;
1364  TensorInfo optProjectionWeights;
1365  TensorInfo optProjectionBias;
1366  TensorInfo optCellToForgetWeights;
1367  TensorInfo optCellToOutputWeights;
1368  TensorInfo optInputLayerNormWeights;
1369  TensorInfo optForgetLayerNormWeights;
1370  TensorInfo optCellLayerNormWeights;
1371  TensorInfo optOutputLayerNormWeights;
1372 
1373  if(!descriptor.m_CifgEnabled)
1374  {
1375  optInputToInputWeights =
1376  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1377  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1378 
1379  optRecurrentToInputWeights =
1380  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1381  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1382  optInputGateBias =
1383  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1384  paramsInfo.m_InputGateBias = &optInputGateBias;
1385  }
1386 
1387  if(descriptor.m_ProjectionEnabled)
1388  {
1389  optProjectionWeights =
1390  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1391  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1392  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1393  {
1394  optProjectionBias =
1395  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1396  paramsInfo.m_ProjectionBias = &optProjectionBias;
1397  }
1398  }
1399 
1400  if(descriptor.m_PeepholeEnabled)
1401  {
1402  if(!descriptor.m_CifgEnabled)
1403  {
1404  optCellToInputWeights =
1405  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1406  dataType);
1407  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1408  }
1409  optCellToForgetWeights =
1410  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1411  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1412  optCellToOutputWeights =
1413  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1414  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1415  }
1416 
1417  if(descriptor.m_LayerNormEnabled)
1418  {
1419  if (!descriptor.m_CifgEnabled)
1420  {
1421  optInputLayerNormWeights = OverrideDataType(
1422  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1423  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1424  }
1425 
1426  optForgetLayerNormWeights = OverrideDataType(
1427  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1428  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1429 
1430  optCellLayerNormWeights = OverrideDataType(
1431  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1432  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1433 
1434  optOutputLayerNormWeights = OverrideDataType(
1435  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1436  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1437  }
1438 
1439  Optional<TensorInfo> hiddenStateOut;
1440  Optional<TensorInfo> cellStateOut;
1441 
1442  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1443  outputStateIn,
1444  cellStateIn,
1445  output,
1446  hiddenStateOut,
1447  cellStateOut,
1448  descriptor,
1449  paramsInfo,
1450  reason);
1451  break;
1452  }
1453  default:
1454  {
1455  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1456  reason.value() = "Unrecognised layer type";
1457  result = false;
1458  break;
1459  }
1460  }
1461  return result;
1462 }
1463 
1465  const IConnectableLayer& connectableLayer,
1466  Optional<DataType> dataType,
1467  std::string& outReasonIfUnsupported)
1468 {
1469  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1470 }
1471 
1473  Optional<DataType> dataType,
1474  std::string& outReasonIfUnsupported)
1475 {
1476  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1477  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1478 }
1479 
1480 // TODO merge with defaulted modelOptions above
1482  Optional<DataType> dataType,
1483  std::string& outReasonIfUnsupported,
1484  const ModelOptions& modelOptions)
1485 {
1486  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1487  return IsLayerConfigurationSupported(layer->GetBackendId(),
1488  connectableLayer,
1489  dataType,
1490  outReasonIfUnsupported,
1491  modelOptions);
1492 }
1493 
1495  const IConnectableLayer& connectableLayer,
1496  Optional<DataType> dataType,
1497  std::string& outReasonIfUnsupported,
1498  const ModelOptions& modelOptions)
1499 {
1500  return IsLayerConfigurationSupported(backendId,
1501  connectableLayer,
1502  dataType,
1503  outReasonIfUnsupported,
1504  modelOptions);
1505 }
1506 
1507 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1508  const WorkloadInfo& /*info*/) const
1509 {
1510  return std::unique_ptr<IWorkload>();
1511 }
1512 
1513 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1514  const WorkloadInfo& /*info*/) const
1515 {
1516  return std::unique_ptr<IWorkload>();
1517 }
1518 
1519 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1520  const WorkloadInfo& /*info*/) const
1521 {
1522  return std::unique_ptr<IWorkload>();
1523 }
1524 
1526  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1527 {
1528  return std::unique_ptr<IWorkload>();
1529 }
1530 
1532  const WorkloadInfo& /*Info*/) const
1533 {
1534  return std::unique_ptr<IWorkload>();
1535 }
1536 
1537 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1538  const WorkloadInfo& /*info*/) const
1539 {
1540  return std::unique_ptr<IWorkload>();
1541 }
1542 
1543 std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
1544  const WorkloadInfo& /*info*/) const
1545 {
1546  return std::unique_ptr<IWorkload>();
1547 }
1548 
1549 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1550  const WorkloadInfo& /*info*/) const
1551 {
1552  return std::unique_ptr<IWorkload>();
1553 }
1554 
1555 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1556  const WorkloadInfo& /*info*/) const
1557 {
1558  return std::unique_ptr<IWorkload>();
1559 }
1560 
1561 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1562  const WorkloadInfo& /*info*/) const
1563 {
1564  return std::unique_ptr<IWorkload>();
1565 }
1566 
1568  const WorkloadInfo& /*info*/) const
1569 {
1570  return std::unique_ptr<IWorkload>();
1571 }
1572 
1574  const WorkloadInfo& /*info*/) const
1575 {
1576  return std::unique_ptr<IWorkload>();
1577 }
1578 
1580  const WorkloadInfo& /*info*/) const
1581 {
1582  return std::unique_ptr<IWorkload>();
1583 }
1584 
1586  const WorkloadInfo& /*info*/) const
1587 {
1588  return std::unique_ptr<IWorkload>();
1589 }
1590 
1591 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1592  const WorkloadInfo& /*info*/) const
1593 {
1594  return std::unique_ptr<IWorkload>();
1595 }
1596 
1597 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/,
1598  const WorkloadInfo& /*info*/) const
1599 {
1600  return std::unique_ptr<IWorkload>();
1601 }
1602 
1603 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1604  const WorkloadInfo& /*info*/) const
1605 {
1606  return std::unique_ptr<IWorkload>();
1607 }
1608 
1609 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1610  const WorkloadInfo& /*info*/) const
1611 {
1612  return std::unique_ptr<IWorkload>();
1613 }
1614 
1616  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1617 {
1618  return std::unique_ptr<IWorkload>();
1619 }
1620 
1621 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1622  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1623 {
1624  return std::unique_ptr<IWorkload>();
1625 }
1626 
1628  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1629 {
1630  return std::unique_ptr<IWorkload>();
1631 }
1632 
1633 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1634  const WorkloadInfo& /*info*/) const
1635 {
1636  return std::unique_ptr<IWorkload>();
1637 }
1638 
1640  const WorkloadInfo& /*info*/) const
1641 {
1642  return std::unique_ptr<IWorkload>();
1643 }
1644 
1646  const WorkloadInfo& /*info*/) const
1647 {
1648  return std::unique_ptr<IWorkload>();
1649 }
1650 
1651 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
1652  const WorkloadInfo& /*info*/) const
1653 {
1654  return std::unique_ptr<IWorkload>();
1655 }
1656 
1657 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1658  const WorkloadInfo& /*info*/) const
1659 {
1660  return std::unique_ptr<IWorkload>();
1661 }
1662 
1663 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1664  const WorkloadInfo& /*info*/) const
1665 {
1666  return std::unique_ptr<IWorkload>();
1667 }
1668 
1669 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1670  const WorkloadInfo& /*info*/) const
1671 {
1672  return std::unique_ptr<IWorkload>();
1673 }
1674 
1676  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1677  const WorkloadInfo& /*info*/) const
1678 {
1679  return std::unique_ptr<IWorkload>();
1680 }
1681 
1683  const WorkloadInfo& /*info*/) const
1684 {
1685  return std::unique_ptr<IWorkload>();
1686 }
1687 
1688 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
1689  const WorkloadInfo& /*info*/) const
1690 {
1691  return std::unique_ptr<IWorkload>();
1692 }
1693 
1695  const WorkloadInfo& /*info*/) const
1696 {
1697  return std::unique_ptr<IWorkload>();
1698 }
1699 
1700 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1701  const WorkloadInfo& /*info*/) const
1702 {
1703  return std::unique_ptr<IWorkload>();
1704 }
1705 
1706 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1707  const WorkloadInfo& /*info*/) const
1708 {
1709  return std::unique_ptr<IWorkload>();
1710 }
1711 
1712 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1713  const WorkloadInfo& /*info*/) const
1714 {
1715  return std::unique_ptr<IWorkload>();
1716 }
1717 
1718 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1719  const WorkloadInfo& /*Info*/) const
1720 {
1721  return std::unique_ptr<IWorkload>();
1722 }
1723 
1724 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1725  const WorkloadInfo& /*info*/) const
1726 {
1727  return std::unique_ptr<IWorkload>();
1728 }
1729 
1730 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1731  const WorkloadInfo& /*info*/) const
1732 {
1733  return std::unique_ptr<IWorkload>();
1734 }
1735 
1736 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1737  const WorkloadInfo& /*info*/) const
1738 {
1739  return std::unique_ptr<IWorkload>();
1740 }
1741 
1742 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1743  const WorkloadInfo& /*info*/) const
1744 {
1745  return std::unique_ptr<IWorkload>();
1746 }
1747 
1748 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1749  const WorkloadInfo& /*info*/) const
1750 {
1751  return std::unique_ptr<IWorkload>();
1752 }
1753 
1754 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1755  const WorkloadInfo& /*info*/) const
1756 {
1757  return std::unique_ptr<IWorkload>();
1758 }
1759 
1760 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1761  const WorkloadInfo& /*info*/) const
1762 {
1763  return std::unique_ptr<IWorkload>();
1764 }
1765 
1766 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1767  const WorkloadInfo& /*Info*/) const
1768 {
1769  return std::unique_ptr<IWorkload>();
1770 }
1771 
1772 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1773  const WorkloadInfo& /*info*/) const
1774 {
1775  return std::unique_ptr<IWorkload>();
1776 }
1777 
1778 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1779  const WorkloadInfo& /*info*/) const
1780 {
1781  return std::unique_ptr<IWorkload>();
1782 }
1783 
1784 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1785  const WorkloadInfo& /*info*/) const
1786 {
1787  return std::unique_ptr<IWorkload>();
1788 }
1789 
1790 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1791  const WorkloadInfo &/*info*/) const
1792 {
1793  return std::unique_ptr<IWorkload>();
1794 }
1795 
1796 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1797  const WorkloadInfo& /*Info*/) const
1798 {
1799  return std::unique_ptr<IWorkload>();
1800 }
1801 
1802 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
1803  const WorkloadInfo& /*info*/) const
1804 {
1805  return std::unique_ptr<IWorkload>();
1806 }
1807 
1808 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1809  const WorkloadInfo& /*info*/) const
1810 {
1811  return std::unique_ptr<IWorkload>();
1812 }
1813 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
1814  const WorkloadInfo& /*info*/) const
1815 {
1816  return std::unique_ptr<IWorkload>();
1817 }
1818 
1819 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
1820  const WorkloadInfo& /*info*/) const
1821 {
1822  return std::unique_ptr<IWorkload>();
1823 }
1824 
1825 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1826  const WorkloadInfo& /*info*/) const
1827 {
1828  return std::unique_ptr<IWorkload>();
1829 }
1830 
1831 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1832  const WorkloadInfo& /*info*/) const
1833 {
1834  return std::unique_ptr<IWorkload>();
1835 }
1836 
1837 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
1838  const WorkloadInfo& /*info*/) const
1839 {
1840  return std::unique_ptr<IWorkload>();
1841 }
1842 
1843 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1844  const WorkloadInfo& /*info*/) const
1845 {
1846  return std::unique_ptr<IWorkload>();
1847 }
1848 
1849 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1850  const WorkloadInfo& /*info*/) const
1851 {
1852  return std::unique_ptr<IWorkload>();
1853 }
1854 
1855 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1856  const WorkloadInfo& /*info*/) const
1857 {
1858  return std::unique_ptr<IWorkload>();
1859 }
1860 
1861 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1862  const WorkloadInfo& /*info*/) const
1863 {
1864  return std::unique_ptr<IWorkload>();
1865 }
1866 
1867 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1868  const WorkloadInfo& /*info*/) const
1869 {
1870  return std::unique_ptr<IWorkload>();
1871 }
1872 
1873 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1874  const WorkloadInfo& /*info*/) const
1875 {
1876  return std::unique_ptr<IWorkload>();
1877 }
1878 
1879 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1880  const WorkloadInfo& /*info*/) const
1881 {
1882  return std::unique_ptr<IWorkload>();
1883 }
1884 
1885 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1886  const WorkloadInfo& /*info*/) const
1887 {
1888  return std::unique_ptr<IWorkload>();
1889 }
1890 
1891 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1892  const WorkloadInfo& /*info*/) const
1893 {
1894  return std::unique_ptr<IWorkload>();
1895 }
1896 
1897 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1898  const WorkloadInfo& /*info*/) const
1899 {
1900  return std::unique_ptr<IWorkload>();
1901 }
1902 
1904  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1905  const WorkloadInfo& /*info*/) const
1906 {
1907  return std::unique_ptr<IWorkload>();
1908 }
1909 
1911  const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/,
1912  const WorkloadInfo& /*info*/) const
1913 {
1914  return std::unique_ptr<IWorkload>();
1915 }
1916 
1917 } // namepsace armnn
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< BackendOptions > ModelOptions
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQLstm(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
Copyright (c) 2021 ARM Limited and Contributors.
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateUnidirectionalSequenceLstm(const UnidirectionalSequenceLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateCast(const CastQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LstmDescriptor UnidirectionalSequenceLstmDescriptor
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalBinary(const LogicalBinaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFill(const FillQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateConvolution3d(const Convolution3dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateChannelShuffle(const ChannelShuffleQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateShape(const ShapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
Depthwise Convolution 2D layer workload data.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const