ArmNN
 21.05
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
19 
21 
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 namespace
28 {
29 using LayerList = std::list<Layer*>;
30 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
31 
32 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
33 {
34  if (!type)
35  {
36  return info;
37  }
38 
39  return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
40 }
41 
42 } // anonymous namespace
43 
44 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
45  const IConnectableLayer& connectableLayer,
46  Optional<DataType> dataType,
47  std::string& outReasonIfUnsupported,
48  const ModelOptions& modelOptions)
49 {
50  Optional<std::string&> reason = outReasonIfUnsupported;
51  bool result;
52  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
53 
54  auto const& backendRegistry = BackendRegistryInstance();
55  if (!backendRegistry.IsBackendRegistered(backendId))
56  {
57  std::stringstream ss;
58  ss << connectableLayer.GetName() << " is not supported on " << backendId
59  << " because this backend is not registered.";
60 
61  outReasonIfUnsupported = ss.str();
62  return false;
63  }
64 
65  auto backendFactory = backendRegistry.GetFactory(backendId);
66  auto backendObject = backendFactory();
67  auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
68 
69  switch(layer.GetType())
70  {
72  {
73  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
74  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76  result = layerSupportObject.IsActivationSupported(
77  OverrideDataType(input, dataType),
78  OverrideDataType(output, dataType),
79  cLayer->GetParameters(),
80  reason);
81  break;
82  }
84  {
85  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
86  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
87  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
88  result = layerSupportObject.IsAdditionSupported(
89  OverrideDataType(input0, dataType),
90  OverrideDataType(input1, dataType),
91  OverrideDataType(output, dataType),
92  reason);
93  break;
94  }
96  {
97  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
98  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99 
100  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
101  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
102  result = layerSupportObject.IsArgMinMaxSupported(
103  OverrideDataType(input, dataType),
104  OverrideDataType(output, DataType::Signed32),
105  descriptor,
106  reason);
107  break;
108  }
110  {
111  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
114  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118  result = layerSupportObject.IsBatchNormalizationSupported(
119  OverrideDataType(input, dataType),
120  OverrideDataType(output, dataType),
121  OverrideDataType(mean, dataType),
122  OverrideDataType(var, dataType),
123  OverrideDataType(beta, dataType),
124  OverrideDataType(gamma, dataType),
125  cLayer->GetParameters(),
126  reason);
127  break;
128  }
130  {
131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
133  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
134 
135  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136  OverrideDataType(output, dataType),
137  cLayer->GetParameters(),
138  reason);
139  break;
140  }
141  case LayerType::Cast:
142  {
143  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
144  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
145 
146  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
147  OverrideDataType(output, dataType),
148  reason);
149  break;
150  }
152  {
153  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
154 
155  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
156  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
157  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
158 
159  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
160  OverrideDataType(input1, dataType),
161  OverrideDataType(output, DataType::Boolean),
162  cLayer->GetParameters(),
163  reason);
164  break;
165  }
166  case LayerType::Constant:
167  {
168  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
169  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
170  break;
171  }
173  {
174  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
175  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
176  result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
177  break;
178  }
180  {
181  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
182  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
183  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
184  break;
185  }
187  {
188  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
189  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
190  result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
191  break;
192  }
194  {
195  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
196  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
197  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
198  break;
199  }
201  {
202  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
203 
204  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
205  dataType);
206  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
207  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
208 
209  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
210 
211  // Construct optional biases object based on the value of m_BiasEnabled
212  Optional<TensorInfo> biases;
213  if (descriptor.m_BiasEnabled)
214  {
215  biases =
216  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
217  }
218 
219  result = layerSupportObject.IsConvolution2dSupported(
220  input,
221  output,
222  descriptor,
223  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
224  biases,
225  reason);
226  break;
227  }
228  case LayerType::Debug:
229  {
230  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
231  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
232 
233  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
234  OverrideDataType(output, dataType),
235  reason);
236  break;
237  }
239  {
240  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
241 
242  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
243  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
244 
245  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
246  OverrideDataType(output, dataType),
247  cLayer->GetParameters(),
248  reason);
249  break;
250  }
252  {
253  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
254  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
255  dataType);
256  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
257  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
258 
259  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
260 
261  // Construct optional biases object based on the value of m_BiasEnabled
262  Optional<TensorInfo> biases;
263  if (descriptor.m_BiasEnabled)
264  {
265  biases =
266  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
267  }
268 
269  result = layerSupportObject.IsDepthwiseConvolutionSupported(
270  input,
271  output,
272  descriptor,
273  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
274  biases,
275  reason);
276  break;
277  }
279  {
280  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
281  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
282 
283  result = layerSupportObject.IsDequantizeSupported(input,
284  OverrideDataType(output, dataType),
285  reason);
286  break;
287  }
289  {
290  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
291  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
292  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
293  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
294 
295  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
296  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
297  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
298  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
299 
300  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
301  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
302  scores,
303  anchors,
304  detectionBoxes,
305  detectionClasses,
306  detectionScores,
307  numDetections,
308  descriptor,
309  reason);
310  break;
311  }
313  {
314  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
315 
316  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
317  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
318 
319  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
320  OverrideDataType(output, dataType),
321  cLayer->GetParameters(),
322  reason);
323  break;
324  }
325  case LayerType::Fill:
326  {
327  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
328  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
329  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
330  const FillDescriptor& descriptor = cLayer->GetParameters();
331 
332  result = layerSupportObject.IsFillSupported(
333  OverrideDataType(input, dataType),
334  OverrideDataType(output, dataType),
335  descriptor,
336  reason);
337  break;
338  }
340  {
341  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
342  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
343  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
344  cLayer->GetParameters(),
345  reason);
346  break;
347  }
348  case LayerType::Floor:
349  {
350  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
351  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
352  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
353  OverrideDataType(output, dataType),
354  reason);
355  break;
356  }
358  {
359  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
360  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
361  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
362 
363  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
364  TensorInfo weightsInfo;
365  const TensorInfo* weightsInfoPtr = nullptr;
366 
367  if (descriptor.m_ConstantWeights)
368  {
369  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
370  weightsInfo = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
371  }
372  else
373  {
374  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
375 
376  }
377  weightsInfoPtr = &weightsInfo;
378 
379  TensorInfo biasInfo;
380  const TensorInfo* biasInfoPtr = nullptr;
381  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
382  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
383  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
384  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
385 
386  if (descriptor.m_BiasEnabled)
387  {
388  if(descriptor.m_ConstantWeights)
389  {
390  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
391  biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
392  biasInfoPtr = &biasInfo;
393  }
394  else
395  {
396  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
397  biasInfoPtr = &biasInfo;
398  }
399  }
400  else
401  {
402  // If biases are not enabled pass a dummy tensorinfo for the validation
403  switch(input.GetDataType())
404  {
405  case DataType::BFloat16:
406  {
407  biasInfoPtr = &dummyBFloat16Bias;
408  break;
409  }
410  case DataType::Float16:
411  {
412  biasInfoPtr = &dummyFloat16Bias;
413  break;
414  }
415  case DataType::Float32:
416  {
417  biasInfoPtr = &dummyFloat32Bias;
418  break;
419  }
420  case DataType::QAsymmU8:
421  case DataType::QAsymmS8:
422  case DataType::QSymmS8:
423  case DataType::QSymmS16:
424  {
425  biasInfoPtr = &dummyQA8Bias;
426  break;
427  }
428  default:
429  {
430  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
431  }
432  }
433  }
434  result = layerSupportObject.IsFullyConnectedSupported(
435  OverrideDataType(input, dataType),
436  OverrideDataType(output, dataType),
437  *weightsInfoPtr,
438  *biasInfoPtr,
439  descriptor,
440  reason);
441  break;
442  }
443  case LayerType::Gather:
444  {
445  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
446  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
447  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
448  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
449  const GatherDescriptor& descriptor = cLayer->GetParameters();
450  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
451  input1,
452  OverrideDataType(output, dataType),
453  descriptor,
454  reason);
455  break;
456  }
457  case LayerType::Input:
458  {
459  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
460  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
461  break;
462  }
464  {
465  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
466  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
467 
468  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
469  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
470 
471  result = layerSupportObject.IsInstanceNormalizationSupported(
472  OverrideDataType(input, dataType),
473  OverrideDataType(output, dataType),
474  descriptor,
475  reason);
476  break;
477  }
479  {
480  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
481  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
482 
483  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
484  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
485 
486  result = layerSupportObject.IsL2NormalizationSupported(
487  OverrideDataType(input, dataType),
488  OverrideDataType(output, dataType),
489  descriptor,
490  reason);
491  break;
492  }
494  {
495  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
496 
497  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
498  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
499  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
500 
501  result = layerSupportObject.IsLogicalBinarySupported(input0,
502  input1,
503  output,
504  cLayer->GetParameters(),
505  reason);
506  break;
507  }
509  {
510  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
511 
512  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
513  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
514 
515  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
516  OverrideDataType(output, dataType),
517  cLayer->GetParameters(),
518  reason);
519  break;
520  }
521  case LayerType::Lstm:
522  {
523  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
524  const LstmDescriptor& descriptor = cLayer->GetParameters();
525 
526  // All inputs.
527  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
528  dataType);
529  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
530  dataType);
531  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
532  dataType);
533  // All outputs
534  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
535  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
536  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
537  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
538 
539  // Basic parameters
540  const TensorInfo& inputToForgetWeights
541  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
542  const TensorInfo& inputToCellWeights
543  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
544  const TensorInfo& inputToOutputWeights
545  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
546  const TensorInfo& recurrentToForgetWeights
547  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
548  const TensorInfo& recurrentToCellWeights
549  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
550  const TensorInfo& recurrentToOutputWeights
551  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
552  const TensorInfo& forgetGateBias
553  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
554  const TensorInfo& cellBias
555  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
556  const TensorInfo& outputGateBias
557  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
558 
559  LstmInputParamsInfo paramsInfo;
560 
561  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
562  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
563  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
564  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
565  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
566  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
567  paramsInfo.m_ForgetGateBias = &forgetGateBias;
568  paramsInfo.m_CellBias = &cellBias;
569  paramsInfo.m_OutputGateBias = &outputGateBias;
570 
571 
572  // Optional parameters
573  TensorInfo optInputToInputWeights;
574  TensorInfo optRecurrentToInputWeights;
575  TensorInfo optCellToInputWeights;
576  TensorInfo optInputGateBias;
577  TensorInfo optProjectionWeights;
578  TensorInfo optProjectionBias;
579  TensorInfo optCellToForgetWeights;
580  TensorInfo optCellToOutputWeights;
581  TensorInfo optInputLayerNormWeights;
582  TensorInfo optForgetLayerNormWeights;
583  TensorInfo optCellLayerNormWeights;
584  TensorInfo optOutputLayerNormWeights;
585 
586  if(!descriptor.m_CifgEnabled)
587  {
588  optInputToInputWeights =
589  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
590  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
591 
592  optRecurrentToInputWeights =
593  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
594  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
595  optInputGateBias =
596  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
597  paramsInfo.m_InputGateBias = &optInputGateBias;
598  }
599 
600  if(descriptor.m_ProjectionEnabled)
601  {
602  optProjectionWeights =
603  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
604  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
605  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
606  {
607  optProjectionBias =
608  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
609  paramsInfo.m_ProjectionBias = &optProjectionBias;
610  }
611  }
612 
613  if(descriptor.m_PeepholeEnabled)
614  {
615  if(!descriptor.m_CifgEnabled)
616  {
617  optCellToInputWeights =
618  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
619  dataType);
620  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
621  }
622  optCellToForgetWeights =
623  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
624  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
625  optCellToOutputWeights =
626  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
627  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
628  }
629 
630  if(descriptor.m_LayerNormEnabled)
631  {
632  if (!descriptor.m_CifgEnabled)
633  {
634  optInputLayerNormWeights = OverrideDataType(
635  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
636  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
637  }
638 
639  optForgetLayerNormWeights = OverrideDataType(
640  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
641  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
642 
643  optCellLayerNormWeights = OverrideDataType(
644  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
645  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
646 
647  optOutputLayerNormWeights = OverrideDataType(
648  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
649  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
650  }
651 
652  result = layerSupportObject.IsLstmSupported(
653  input,
654  outputStateIn,
655  cellStateIn,
656  scratchBuffer,
657  outputStateOut,
658  cellStateOut,
659  output,
660  descriptor,
661  paramsInfo,
662  reason);
663  break;
664  }
665  case LayerType::Maximum:
666  {
667  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
668  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
669  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
670 
671  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
672  OverrideDataType(input1, dataType),
673  OverrideDataType(output, dataType),
674  reason);
675  break;
676  }
677  case LayerType::MemCopy:
678  {
679  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
680  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
681 
682  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
683  OverrideDataType(output, dataType),
684  reason);
685  break;
686  }
688  {
689  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
690  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
691 
692  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
693  OverrideDataType(output, dataType),
694  reason);
695  break;
696  }
697  case LayerType::Merge:
698  {
699  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
700  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
701  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
702 
703  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
704  OverrideDataType(input1, dataType),
705  OverrideDataType(output, dataType),
706  reason);
707  break;
708  }
709  case LayerType::Concat:
710  {
711  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
712 
713  // Get vector of all inputs.
714  auto getTensorInfo = [&dataType](const InputSlot& slot)
715  {
716  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
717  };
718 
719  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
720  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
721  std::vector<TensorInfo> inputs(beginI, endI);
722 
723  auto getTensorInfoPtr = [](const TensorInfo& info)
724  {
725  return &info;
726  };
727 
728  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
729  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
730  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
731 
732  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
733 
734  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
735 
736 
737  break;
738  }
740  {
741  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
742  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
743  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
744  result = layerSupportObject.IsMultiplicationSupported(
745  OverrideDataType(input0, dataType),
746  OverrideDataType(input1, dataType),
747  OverrideDataType(output, dataType),
748  reason);
749  break;
750  }
752  {
753  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
754  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
755  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
756  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
757  OverrideDataType(output, dataType),
758  cLayer->GetParameters(),
759  reason);
760  break;
761  }
762  case LayerType::Output:
763  {
764  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
765  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
766  break;
767  }
768  case LayerType::Permute:
769  {
770  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
771  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
772  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
773  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
774  OverrideDataType(output, dataType),
775  cLayer->GetParameters(),
776  reason);
777  break;
778  }
779  case LayerType::Pad:
780  {
781  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
782  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
783  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
784  result = layerSupportObject.IsPadSupported(
785  OverrideDataType(input, dataType),
786  OverrideDataType(output, dataType),
787  cLayer->GetParameters(),
788  reason);
789  break;
790  }
792  {
793  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
794  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
795  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
796  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
797  OverrideDataType(output, dataType),
798  cLayer->GetParameters(),
799  reason);
800  break;
801  }
803  {
804  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
805  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
806  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
807  cLayer->GetParameters(),
808  reason);
809  break;
810  }
811  case LayerType::Quantize:
812  {
813  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
814  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
815  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
816  break;
817  }
818  case LayerType::QLstm:
819  {
820  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
821  const QLstmDescriptor& descriptor = cLayer->GetParameters();
822 
823  // Inputs
824  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
825  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
826  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
827 
828  // Outputs
829  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
830  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
831  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
832 
833  // Lstm parameters
834  LstmInputParamsInfo paramsInfo;
835 
836  // Basic parameters
837  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
838  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
839  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
840 
841  paramsInfo.m_RecurrentToForgetWeights =
842  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
843  paramsInfo.m_RecurrentToCellWeights =
844  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
845  paramsInfo.m_RecurrentToOutputWeights =
846  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
847 
848  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
849  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
850  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
851 
852  if(!descriptor.m_CifgEnabled)
853  {
854  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
855  paramsInfo.m_RecurrentToInputWeights =
856  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
857  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
858  }
859 
860  if(descriptor.m_ProjectionEnabled)
861  {
862  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
863 
864  // Projection bias is optional even if projection is enabled
865  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
866  {
867  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
868  }
869  }
870 
871  if(descriptor.m_PeepholeEnabled)
872  {
873  if (!descriptor.m_CifgEnabled)
874  {
875  paramsInfo.m_CellToInputWeights =
876  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
877  }
878 
879  paramsInfo.m_CellToForgetWeights =
880  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
881  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
882  }
883 
884  if(descriptor.m_LayerNormEnabled)
885  {
886  if (!descriptor.m_CifgEnabled)
887  {
888  paramsInfo.m_InputLayerNormWeights =
889  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
890  }
891 
892  paramsInfo.m_ForgetLayerNormWeights =
893  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
894  paramsInfo.m_CellLayerNormWeights =
895  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
896  paramsInfo.m_OutputLayerNormWeights =
897  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
898  }
899 
900  result = layerSupportObject.IsQLstmSupported(input,
901  previousOutputIn,
902  previousCellStateIn,
903  outputStateOut,
904  cellStateOut,
905  output,
906  descriptor,
907  paramsInfo,
908  reason);
909  break;
910  }
912  {
913  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
914 
915  // Inputs
916  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
917  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
918  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
919 
920  // Outputs
921  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
922  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
923 
924  // QuantizedLstm parameters
925  QuantizedLstmInputParamsInfo paramsInfo;
926 
927  paramsInfo.m_InputToInputWeights =
928  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
929  paramsInfo.m_InputToForgetWeights =
930  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
931  paramsInfo.m_InputToCellWeights =
932  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
933  paramsInfo.m_InputToOutputWeights =
934  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
935 
936  paramsInfo.m_RecurrentToInputWeights =
937  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
938  paramsInfo.m_RecurrentToForgetWeights =
939  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
940  paramsInfo.m_RecurrentToCellWeights =
941  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
942  paramsInfo.m_RecurrentToOutputWeights =
943  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
944 
945  paramsInfo.m_InputGateBias =
946  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
947  paramsInfo.m_ForgetGateBias =
948  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
949  paramsInfo.m_CellBias =
950  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
951  paramsInfo.m_OutputGateBias =
952  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
953 
954  result = layerSupportObject.IsQuantizedLstmSupported(input,
955  previousCellStateIn,
956  previousOutputIn,
957  cellStateOut,
958  output,
959  paramsInfo,
960  reason);
961  break;
962  }
963  case LayerType::Division:
964  {
965  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
966  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
967  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
968  result = layerSupportObject.IsDivisionSupported(
969  OverrideDataType(input0, dataType),
970  OverrideDataType(input1, dataType),
971  OverrideDataType(output, dataType),
972  reason);
973  break;
974  }
975  case LayerType::Rank:
976  {
977  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
978  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
979  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
980  OverrideDataType(output, dataType),
981  reason);
982  break;
983  }
984  case LayerType::Reshape:
985  {
986  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
987  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
988  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
989  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
990  OverrideDataType(output, dataType),
991  cLayer->GetParameters(),
992  reason);
993  break;
994  }
995  case LayerType::Resize:
996  {
997  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
998  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
999  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1000  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1001  OverrideDataType(output, dataType),
1002  cLayer->GetParameters(),
1003  reason);
1004  break;
1005  }
1006  case LayerType::Slice:
1007  {
1008  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1009 
1010  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1011  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1012 
1013  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1014  OverrideDataType(output, dataType),
1015  cLayer->GetParameters(),
1016  reason);
1017  break;
1018  }
1019  case LayerType::Softmax:
1020  {
1021  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1022  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1023  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1024  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1025  OverrideDataType(output, dataType),
1026  cLayer->GetParameters(),
1027  reason);
1028  break;
1029  }
1031  {
1032  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1033  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1034  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1035  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1036  OverrideDataType(output, dataType),
1037  cLayer->GetParameters(),
1038  reason);
1039  break;
1040  }
1042  {
1043  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1044 
1045  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1046  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1047 
1048  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1049  OverrideDataType(output, dataType),
1050  cLayer->GetParameters(),
1051  reason);
1052  break;
1053  }
1054  case LayerType::Splitter:
1055  {
1056  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1057  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1058 
1059  // Get vector of all outputs.
1060  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1061  {
1062  return OverrideDataType(slot.GetTensorInfo(), dataType);
1063  };
1064  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1065  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1066  std::vector<TensorInfo> outputs(beginI, endI);
1067 
1068  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1069 
1070  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1071  outputPtrs,
1072  cLayer->GetParameters(),
1073  reason);
1074  break;
1075  }
1076  case LayerType::Stack:
1077  {
1078  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1079 
1080  // Get vector of all inputs.
1081  auto getTensorInfo = [&dataType](const InputSlot& slot)
1082  {
1083  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1084  };
1085  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1086  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1087  std::vector<TensorInfo> inputs(beginI, endI);
1088 
1089  auto getTensorInfoPtr = [](const TensorInfo& info)
1090  {
1091  return &info;
1092  };
1093  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1094  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1095  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1096 
1097  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1098 
1099  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1100 
1101  break;
1102  }
1103  case LayerType::StandIn:
1104  {
1105  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1106 
1107  // Get vector of all inputs.
1108  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1109  {
1110  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1111  };
1112  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1113  {
1114  return OverrideDataType(slot.GetTensorInfo(), dataType);
1115  };
1116  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1117  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1118  std::vector<TensorInfo> inputs(beginI, endI);
1119 
1120  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1121  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1122  std::vector<TensorInfo> outputs(beginO, endO);
1123 
1124 
1125  auto getTensorInfoPtr = [](const TensorInfo& info)
1126  {
1127  return &info;
1128  };
1129  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1130  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1131  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1132 
1133  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1134  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1135  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1136 
1137 
1138  result = layerSupportObject.IsStandInSupported(inputPtrs,
1139  outputPtrs,
1140  cLayer->GetParameters(),
1141  reason);
1142  break;
1143  }
1145  {
1146  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1147  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1148  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1149  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1150  OverrideDataType(output, dataType),
1151  cLayer->GetParameters(),
1152  reason);
1153  break;
1154  }
1156  {
1157  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1158  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1159  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1160  result = layerSupportObject.IsSubtractionSupported(
1161  OverrideDataType(input0, dataType),
1162  OverrideDataType(input1, dataType),
1163  OverrideDataType(output, dataType),
1164  reason);
1165  break;
1166  }
1167  case LayerType::Switch:
1168  {
1169  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1170  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1171  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1172  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1173  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1174  OverrideDataType(input1, dataType),
1175  OverrideDataType(output0, dataType),
1176  OverrideDataType(output1, dataType),
1177  reason);
1178  break;
1179  }
1180  case LayerType::Mean:
1181  {
1182  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1183  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1184  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1185  result = layerSupportObject.IsMeanSupported(
1186  OverrideDataType(input, dataType),
1187  OverrideDataType(output, dataType),
1188  cLayer->GetParameters(),
1189  reason);
1190  break;
1191  }
1192  case LayerType::Minimum:
1193  {
1194  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1195  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1196  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1197  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1198  OverrideDataType(input1, dataType),
1199  OverrideDataType(output, dataType),
1200  reason);
1201  break;
1202  }
1203  case LayerType::Prelu:
1204  {
1205  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1206  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1207  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1208  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1209  OverrideDataType(alpha, dataType),
1210  OverrideDataType(output, dataType),
1211  reason);
1212  break;
1213  }
1214  case LayerType::Transpose:
1215  {
1216  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1217  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1218  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1219  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1220  OverrideDataType(output, dataType),
1221  cLayer->GetParameters(),
1222  reason);
1223  break;
1224  }
1226  {
1227  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1228 
1229  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1230  dataType);
1231  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1232 
1233  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1234 
1235  Optional<TensorInfo> biases;
1236  if (descriptor.m_BiasEnabled)
1237  {
1238  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1239  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1240  GetBiasTypeFromWeightsType(dataType));
1241  }
1242 
1243  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1244  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1245 
1246  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1247  output,
1248  descriptor,
1249  weights,
1250  biases,
1251  reason);
1252 
1253  break;
1254  }
1255  case LayerType::Reduce:
1256  {
1257  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1258  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1259  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1260 
1261  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1262  OverrideDataType(output, dataType),
1263  cLayer->GetParameters(),
1264  reason);
1265  break;
1266  }
1267  default:
1268  {
1269  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1270  reason.value() = "Unrecognised layer type";
1271  result = false;
1272  break;
1273  }
1274  }
1275  return result;
1276 }
1277 
1279  const IConnectableLayer& connectableLayer,
1280  Optional<DataType> dataType,
1281  std::string& outReasonIfUnsupported)
1282 {
1283  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1284 }
1285 
1287  Optional<DataType> dataType,
1288  std::string& outReasonIfUnsupported)
1289 {
1290  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1291  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1292 }
1293 
1294 // TODO merge with defaulted modelOptions above
1296  Optional<DataType> dataType,
1297  std::string& outReasonIfUnsupported,
1298  const ModelOptions& modelOptions)
1299 {
1300  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1301  return IsLayerConfigurationSupported(layer->GetBackendId(),
1302  connectableLayer,
1303  dataType,
1304  outReasonIfUnsupported,
1305  modelOptions);
1306 }
1307 
1309  const IConnectableLayer& connectableLayer,
1310  Optional<DataType> dataType,
1311  std::string& outReasonIfUnsupported,
1312  const ModelOptions& modelOptions)
1313 {
1314  return IsLayerConfigurationSupported(backendId,
1315  connectableLayer,
1316  dataType,
1317  outReasonIfUnsupported,
1318  modelOptions);
1319 }
1320 
1321 // Default Implementations
1322 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1323  const WorkloadInfo& /*info*/) const
1324 {
1325  return std::unique_ptr<IWorkload>();
1326 }
1327 
1328 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1329  const WorkloadInfo& /*info*/) const
1330 {
1331  return std::unique_ptr<IWorkload>();
1332 }
1333 
1334 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1335  const WorkloadInfo& /*info*/) const
1336 {
1337  return std::unique_ptr<IWorkload>();
1338 }
1339 
1340 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1341  const WorkloadInfo& /*info*/) const
1342 {
1343  return std::unique_ptr<IWorkload>();
1344 }
1345 
1347  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1348 {
1349  return std::unique_ptr<IWorkload>();
1350 }
1351 
1353  const WorkloadInfo& /*Info*/) const
1354 {
1355  return std::unique_ptr<IWorkload>();
1356 }
1357 
1358 std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
1359  const WorkloadInfo& /*info*/) const
1360 {
1361  return std::unique_ptr<IWorkload>();
1362 }
1363 
1364 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1365  const WorkloadInfo& /*info*/) const
1366 {
1367  return std::unique_ptr<IWorkload>();
1368 }
1369 
1370 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1371  const WorkloadInfo& /*info*/) const
1372 {
1373  return std::unique_ptr<IWorkload>();
1374 }
1375 
1376 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1377  const WorkloadInfo& /*info*/) const
1378 {
1379  return std::unique_ptr<IWorkload>();
1380 }
1381 
1383  const WorkloadInfo& /*info*/) const
1384 {
1385  return std::unique_ptr<IWorkload>();
1386 }
1387 
1389  const WorkloadInfo& /*info*/) const
1390 {
1391  return std::unique_ptr<IWorkload>();
1392 }
1393 
1395  const WorkloadInfo& /*info*/) const
1396 {
1397  return std::unique_ptr<IWorkload>();
1398 }
1399 
1401  const WorkloadInfo& /*info*/) const
1402 {
1403  return std::unique_ptr<IWorkload>();
1404 }
1405 
1406 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1407  const WorkloadInfo& /*info*/) const
1408 {
1409  return std::unique_ptr<IWorkload>();
1410 }
1411 
1412 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1413  const WorkloadInfo& /*info*/) const
1414 {
1415  return std::unique_ptr<IWorkload>();
1416 }
1417 
1418 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1419  const WorkloadInfo& /*info*/) const
1420 {
1421  return std::unique_ptr<IWorkload>();
1422 }
1423 
1425  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1426 {
1427  return std::unique_ptr<IWorkload>();
1428 }
1429 
1430 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1431  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1432 {
1433  return std::unique_ptr<IWorkload>();
1434 }
1435 
1437  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1438 {
1439  return std::unique_ptr<IWorkload>();
1440 }
1441 
1442 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1443  const WorkloadInfo& /*info*/) const
1444 {
1445  return std::unique_ptr<IWorkload>();
1446 }
1447 
1449  const WorkloadInfo& /*info*/) const
1450 {
1451  return std::unique_ptr<IWorkload>();
1452 }
1453 
1454 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1455  const WorkloadInfo& /*Info*/) const
1456 {
1457  return std::unique_ptr<IWorkload>();
1458 }
1459 
1461  const WorkloadInfo& /*info*/) const
1462 {
1463  return std::unique_ptr<IWorkload>();
1464 }
1465 
1466 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
1467  const WorkloadInfo& /*info*/) const
1468 {
1469  return std::unique_ptr<IWorkload>();
1470 }
1471 
1472 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1473  const WorkloadInfo& /*info*/) const
1474 {
1475  return std::unique_ptr<IWorkload>();
1476 }
1477 
1478 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1479  const WorkloadInfo& /*info*/) const
1480 {
1481  return std::unique_ptr<IWorkload>();
1482 }
1483 
1484 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1485  const WorkloadInfo& /*info*/) const
1486 {
1487  return std::unique_ptr<IWorkload>();
1488 }
1489 
1490 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1491  const WorkloadInfo& /*info*/) const
1492 {
1493  return std::unique_ptr<IWorkload>();
1494 }
1495 
1497  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1498  const WorkloadInfo& /*info*/) const
1499 {
1500  return std::unique_ptr<IWorkload>();
1501 }
1502 
1504  const WorkloadInfo& /*info*/) const
1505 {
1506  return std::unique_ptr<IWorkload>();
1507 }
1508 
1509 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
1510  const WorkloadInfo& /*info*/) const
1511 {
1512  return std::unique_ptr<IWorkload>();
1513 }
1514 
1516  const WorkloadInfo& /*info*/) const
1517 {
1518  return std::unique_ptr<IWorkload>();
1519 }
1520 
1521 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1522  const WorkloadInfo& /*info*/) const
1523 {
1524  return std::unique_ptr<IWorkload>();
1525 }
1526 
1527 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1528  const WorkloadInfo& /*info*/) const
1529 {
1530  return std::unique_ptr<IWorkload>();
1531 }
1532 
1533 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1534  const WorkloadInfo& /*info*/) const
1535 {
1536  return std::unique_ptr<IWorkload>();
1537 }
1538 
1539 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1540  const WorkloadInfo& /*Info*/) const
1541 {
1542  return std::unique_ptr<IWorkload>();
1543 }
1544 
1545 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1546  const WorkloadInfo& /*info*/) const
1547 {
1548  return std::unique_ptr<IWorkload>();
1549 }
1550 
1551 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1552  const WorkloadInfo& /*info*/) const
1553 {
1554  return std::unique_ptr<IWorkload>();
1555 }
1556 
1557 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1558  const WorkloadInfo& /*info*/) const
1559 {
1560  return std::unique_ptr<IWorkload>();
1561 }
1562 
1563 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1564  const WorkloadInfo& /*info*/) const
1565 {
1566  return std::unique_ptr<IWorkload>();
1567 }
1568 
1569 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1570  const WorkloadInfo& /*info*/) const
1571 {
1572  return std::unique_ptr<IWorkload>();
1573 }
1574 
1575 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1576  const WorkloadInfo& /*info*/) const
1577 {
1578  return std::unique_ptr<IWorkload>();
1579 }
1580 
1581 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1582  const WorkloadInfo& /*info*/) const
1583 {
1584  return std::unique_ptr<IWorkload>();
1585 }
1586 
1587 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1588  const WorkloadInfo& /*info*/) const
1589 {
1590  return std::unique_ptr<IWorkload>();
1591 }
1592 
1593 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1594  const WorkloadInfo& /*Info*/) const
1595 {
1596  return std::unique_ptr<IWorkload>();
1597 }
1598 
1599 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1600  const WorkloadInfo& /*info*/) const
1601 {
1602  return std::unique_ptr<IWorkload>();
1603 }
1604 
1605 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1606  const WorkloadInfo& /*info*/) const
1607 {
1608  return std::unique_ptr<IWorkload>();
1609 }
1610 
1611 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1612  const WorkloadInfo& /*info*/) const
1613 {
1614  return std::unique_ptr<IWorkload>();
1615 }
1616 
1617 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1618  const WorkloadInfo &/*info*/) const
1619 {
1620  return std::unique_ptr<IWorkload>();
1621 }
1622 
1623 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1624  const WorkloadInfo& /*Info*/) const
1625 {
1626  return std::unique_ptr<IWorkload>();
1627 }
1628 
1629 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
1630  const WorkloadInfo& /*info*/) const
1631 {
1632  return std::unique_ptr<IWorkload>();
1633 }
1634 
1635 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1636  const WorkloadInfo& /*info*/) const
1637 {
1638  return std::unique_ptr<IWorkload>();
1639 }
1640 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
1641  const WorkloadInfo& /*info*/) const
1642 {
1643  return std::unique_ptr<IWorkload>();
1644 }
1645 
1646 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/,
1647  const WorkloadInfo& /*info*/) const
1648 {
1649  return std::unique_ptr<IWorkload>();
1650 }
1651 
1652 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1653  const WorkloadInfo& /*info*/) const
1654 {
1655  return std::unique_ptr<IWorkload>();
1656 }
1657 
1658 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1659  const WorkloadInfo& /*info*/) const
1660 {
1661  return std::unique_ptr<IWorkload>();
1662 }
1663 
1664 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1665  const WorkloadInfo& /*info*/) const
1666 {
1667  return std::unique_ptr<IWorkload>();
1668 }
1669 
1670 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1671  const WorkloadInfo& /*info*/) const
1672 {
1673  return std::unique_ptr<IWorkload>();
1674 }
1675 
1676 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1677  const WorkloadInfo& /*info*/) const
1678 {
1679  return std::unique_ptr<IWorkload>();
1680 }
1681 
1682 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1683  const WorkloadInfo& /*info*/) const
1684 {
1685  return std::unique_ptr<IWorkload>();
1686 }
1687 
1688 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1689  const WorkloadInfo& /*info*/) const
1690 {
1691  return std::unique_ptr<IWorkload>();
1692 }
1693 
1694 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1695  const WorkloadInfo& /*info*/) const
1696 {
1697  return std::unique_ptr<IWorkload>();
1698 }
1699 
1700 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1701  const WorkloadInfo& /*info*/) const
1702 {
1703  return std::unique_ptr<IWorkload>();
1704 }
1705 
1706 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1707  const WorkloadInfo& /*info*/) const
1708 {
1709  return std::unique_ptr<IWorkload>();
1710 }
1711 
1712 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1713  const WorkloadInfo& /*info*/) const
1714 {
1715  return std::unique_ptr<IWorkload>();
1716 }
1717 
1718 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1719  const WorkloadInfo& /*info*/) const
1720 {
1721  return std::unique_ptr<IWorkload>();
1722 }
1723 
1724 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1725  const WorkloadInfo& /*info*/) const
1726 {
1727  return std::unique_ptr<IWorkload>();
1728 }
1729 
1730 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1731  const WorkloadInfo& /*info*/) const
1732 {
1733  return std::unique_ptr<IWorkload>();
1734 }
1735 
1737  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1738  const WorkloadInfo& /*info*/) const
1739 {
1740  return std::unique_ptr<IWorkload>();
1741 }
1742 
1743 } // namepsace armnn
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< BackendOptions > ModelOptions
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQLstm(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
Copyright (c) 2021 ARM Limited and Contributors.
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateCast(const CastQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalBinary(const LogicalBinaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFill(const FillQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const