ArmNN
 20.08
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
11 #include <armnn/ILayerSupport.hpp>
14 
17 
19 
20 #include <boost/iterator/transform_iterator.hpp>
21 
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 namespace
28 {
29 
30 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
31 {
32  if (!type)
33  {
34  return info;
35  }
36 
37  return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
38 }
39 
40 } // anonymous namespace
41 
43  const IConnectableLayer& connectableLayer,
44  Optional<DataType> dataType,
45  std::string& outReasonIfUnsupported)
46 {
47  Optional<std::string&> reason = outReasonIfUnsupported;
48  bool result;
49  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
50 
51  auto const& backendRegistry = BackendRegistryInstance();
52  if (!backendRegistry.IsBackendRegistered(backendId))
53  {
54  std::stringstream ss;
55  ss << connectableLayer.GetName() << " is not supported on " << backendId
56  << " because this backend is not registered.";
57 
58  outReasonIfUnsupported = ss.str();
59  return false;
60  }
61 
62  auto backendFactory = backendRegistry.GetFactory(backendId);
63  auto backendObject = backendFactory();
64  auto layerSupportObject = backendObject->GetLayerSupport();
65 
66  switch(layer.GetType())
67  {
69  {
70  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
71  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
72  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
73  result = layerSupportObject->IsActivationSupported(
74  OverrideDataType(input, dataType),
75  OverrideDataType(output, dataType),
76  cLayer->GetParameters(),
77  reason);
78  break;
79  }
81  {
82  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
83  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
84  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
85  result = layerSupportObject->IsAdditionSupported(
86  OverrideDataType(input0, dataType),
87  OverrideDataType(input1, dataType),
88  OverrideDataType(output, dataType),
89  reason);
90  break;
91  }
93  {
94  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
95  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
96 
97  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
98  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
99  result = layerSupportObject->IsArgMinMaxSupported(
100  OverrideDataType(input, dataType),
101  OverrideDataType(output, DataType::Signed32),
102  descriptor,
103  reason);
104  break;
105  }
107  {
108  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
109  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
110  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
111  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
112  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
113  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
114  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
115  result = layerSupportObject->IsBatchNormalizationSupported(
116  OverrideDataType(input, dataType),
117  OverrideDataType(output, dataType),
118  OverrideDataType(mean, dataType),
119  OverrideDataType(var, dataType),
120  OverrideDataType(beta, dataType),
121  OverrideDataType(gamma, dataType),
122  cLayer->GetParameters(),
123  reason);
124  break;
125  }
127  {
128  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
129  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
130  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
131 
132  result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
133  OverrideDataType(output, dataType),
134  cLayer->GetParameters(),
135  reason);
136  break;
137  }
139  {
140  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
141 
142  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
143  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
144  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
145 
146  result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
147  OverrideDataType(input1, dataType),
148  OverrideDataType(output, DataType::Boolean),
149  cLayer->GetParameters(),
150  reason);
151  break;
152  }
153  case LayerType::Constant:
154  {
155  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
156  result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
157  break;
158  }
160  {
161  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
162  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
163  result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason);
164  break;
165  }
167  {
168  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
169  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
170  result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
171  break;
172  }
174  {
175  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
176  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
177  result = layerSupportObject->IsConvertFp32ToBf16Supported(input, output, reason);
178  break;
179  }
181  {
182  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
183  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
184  result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
185  break;
186  }
188  {
189  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
190 
191  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
192  dataType);
193  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
194  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
195 
196  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
197 
198  // Construct optional biases object based on the value of m_BiasEnabled
199  Optional<TensorInfo> biases;
200  if (descriptor.m_BiasEnabled)
201  {
202  biases =
203  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
204  }
205 
206  result = layerSupportObject->IsConvolution2dSupported(
207  input,
208  output,
209  descriptor,
210  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
211  biases,
212  reason);
213  break;
214  }
215  case LayerType::Debug:
216  {
217  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
218  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
219 
220  result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
221  OverrideDataType(output, dataType),
222  reason);
223  break;
224  }
226  {
227  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
228 
229  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
230  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
231 
232  result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
233  OverrideDataType(output, dataType),
234  cLayer->GetParameters(),
235  reason);
236  break;
237  }
239  {
240  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
241  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
242  dataType);
243  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
244  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
245 
246  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
247 
248  // Construct optional biases object based on the value of m_BiasEnabled
249  Optional<TensorInfo> biases;
250  if (descriptor.m_BiasEnabled)
251  {
252  biases =
253  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
254  }
255 
256  result = layerSupportObject->IsDepthwiseConvolutionSupported(
257  input,
258  output,
259  descriptor,
260  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
261  biases,
262  reason);
263  break;
264  }
266  {
267  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
268  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
269 
270  result = layerSupportObject->IsDequantizeSupported(input,
271  OverrideDataType(output, dataType),
272  reason);
273  break;
274  }
276  {
277  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
280  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
281 
282  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
283  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
284  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
285  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
286 
287  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
288  result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
289  scores,
290  anchors,
291  detectionBoxes,
292  detectionClasses,
293  detectionScores,
294  numDetections,
295  descriptor,
296  reason);
297  break;
298  }
300  {
301  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
302 
303  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
304  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
305 
306  result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
307  OverrideDataType(output, dataType),
308  cLayer->GetParameters(),
309  reason);
310  break;
311  }
312  case LayerType::Fill:
313  {
314  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
315  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
316  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
317  const FillDescriptor& descriptor = cLayer->GetParameters();
318 
319  result = layerSupportObject->IsFillSupported(
320  OverrideDataType(input, dataType),
321  OverrideDataType(output, dataType),
322  descriptor,
323  reason);
324  break;
325  }
327  {
328  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
329  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
330  result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
331  cLayer->GetParameters(),
332  reason);
333  break;
334  }
335  case LayerType::Floor:
336  {
337  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
338  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
339  result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
340  OverrideDataType(output, dataType),
341  reason);
342  break;
343  }
345  {
346  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
347  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
348  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
349  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
350 
351  TensorInfo biasInfo;
352  const TensorInfo * biasInfoPtr = nullptr;
353  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
354  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
355  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
356  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
357 
358  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
359  if (descriptor.m_BiasEnabled)
360  {
361  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
362  biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
363  biasInfoPtr = &biasInfo;
364  }
365  else
366  {
367  // If biases are not enabled pass a dummy tensorinfo for the validation
368  switch(input.GetDataType())
369  {
370  case DataType::BFloat16:
371  {
372  biasInfoPtr = &dummyBFloat16Bias;
373  break;
374  }
375  case DataType::Float16:
376  {
377  biasInfoPtr = &dummyFloat16Bias;
378  break;
379  }
380  case DataType::Float32:
381  {
382  biasInfoPtr = &dummyFloat32Bias;
383  break;
384  }
385  case DataType::QAsymmU8:
386  case DataType::QAsymmS8:
387  case DataType::QSymmS8:
388  case DataType::QSymmS16:
389  {
390  biasInfoPtr = &dummyQA8Bias;
391  break;
392  }
393  default:
394  {
395  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
396  }
397  }
398  }
399 
400  result = layerSupportObject->IsFullyConnectedSupported(
401  OverrideDataType(input, dataType),
402  OverrideDataType(output, dataType),
403  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
404  *biasInfoPtr,
405  descriptor,
406  reason);
407  break;
408  }
409  case LayerType::Gather:
410  {
411  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
412  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
413  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
414  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
415  const GatherDescriptor& descriptor = cLayer->GetParameters();
416  result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
417  input1,
418  OverrideDataType(output, dataType),
419  descriptor,
420  reason);
421  break;
422  }
423  case LayerType::Input:
424  {
425  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
426  result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
427  break;
428  }
430  {
431  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
432  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
433 
434  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
435  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
436 
437  result = layerSupportObject->IsInstanceNormalizationSupported(
438  OverrideDataType(input, dataType),
439  OverrideDataType(output, dataType),
440  descriptor,
441  reason);
442  break;
443  }
445  {
446  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
447  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
448 
449  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
450  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
451 
452  result = layerSupportObject->IsL2NormalizationSupported(
453  OverrideDataType(input, dataType),
454  OverrideDataType(output, dataType),
455  descriptor,
456  reason);
457  break;
458  }
460  {
461  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
462 
463  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
464  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
465 
466  result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
467  OverrideDataType(output, dataType),
468  cLayer->GetParameters(),
469  reason);
470  break;
471  }
472  case LayerType::Lstm:
473  {
474  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
475  const LstmDescriptor& descriptor = cLayer->GetParameters();
476 
477  // All inputs.
478  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
479  dataType);
480  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
481  dataType);
482  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
483  dataType);
484  // All outputs
485  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
486  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
487  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
488  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
489 
490  // Basic parameters
491  const TensorInfo& inputToForgetWeights
492  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
493  const TensorInfo& inputToCellWeights
494  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
495  const TensorInfo& inputToOutputWeights
496  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
497  const TensorInfo& recurrentToForgetWeights
498  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
499  const TensorInfo& recurrentToCellWeights
500  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
501  const TensorInfo& recurrentToOutputWeights
502  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
503  const TensorInfo& forgetGateBias
504  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
505  const TensorInfo& cellBias
506  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
507  const TensorInfo& outputGateBias
508  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
509 
510  LstmInputParamsInfo paramsInfo;
511 
512  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
513  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
514  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
515  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
516  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
517  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
518  paramsInfo.m_ForgetGateBias = &forgetGateBias;
519  paramsInfo.m_CellBias = &cellBias;
520  paramsInfo.m_OutputGateBias = &outputGateBias;
521 
522 
523  // Optional parameters
524  TensorInfo optInputToInputWeights;
525  TensorInfo optRecurrentToInputWeights;
526  TensorInfo optCellToInputWeights;
527  TensorInfo optInputGateBias;
528  TensorInfo optProjectionWeights;
529  TensorInfo optProjectionBias;
530  TensorInfo optCellToForgetWeights;
531  TensorInfo optCellToOutputWeights;
532  TensorInfo optInputLayerNormWeights;
533  TensorInfo optForgetLayerNormWeights;
534  TensorInfo optCellLayerNormWeights;
535  TensorInfo optOutputLayerNormWeights;
536 
537  if(!descriptor.m_CifgEnabled)
538  {
539  optInputToInputWeights =
540  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
541  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
542 
543  optRecurrentToInputWeights =
544  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
545  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
546  optInputGateBias =
547  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
548  paramsInfo.m_InputGateBias = &optInputGateBias;
549  }
550 
551  if(descriptor.m_ProjectionEnabled)
552  {
553  optProjectionWeights =
554  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
555  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
556  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
557  {
558  optProjectionBias =
559  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
560  paramsInfo.m_ProjectionBias = &optProjectionBias;
561  }
562  }
563 
564  if(descriptor.m_PeepholeEnabled)
565  {
566  if(!descriptor.m_CifgEnabled)
567  {
568  optCellToInputWeights =
569  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
570  dataType);
571  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
572  }
573  optCellToForgetWeights =
574  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
575  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
576  optCellToOutputWeights =
577  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
578  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
579  }
580 
581  if(descriptor.m_LayerNormEnabled)
582  {
583  if (!descriptor.m_CifgEnabled)
584  {
585  optInputLayerNormWeights = OverrideDataType(
586  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
587  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
588  }
589 
590  optForgetLayerNormWeights = OverrideDataType(
591  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
592  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
593 
594  optCellLayerNormWeights = OverrideDataType(
595  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
596  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
597 
598  optOutputLayerNormWeights = OverrideDataType(
599  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
600  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
601  }
602 
603  result = layerSupportObject->IsLstmSupported(
604  input,
605  outputStateIn,
606  cellStateIn,
607  scratchBuffer,
608  outputStateOut,
609  cellStateOut,
610  output,
611  descriptor,
612  paramsInfo,
613  reason);
614  break;
615  }
616  case LayerType::Maximum:
617  {
618  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
619  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
620  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
621 
622  result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
623  OverrideDataType(input1, dataType),
624  OverrideDataType(output, dataType),
625  reason);
626  break;
627  }
628  case LayerType::MemCopy:
629  {
630  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
631  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
632 
633  result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
634  OverrideDataType(output, dataType),
635  reason);
636  break;
637  }
639  {
640  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
641  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
642 
643  result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
644  OverrideDataType(output, dataType),
645  reason);
646  break;
647  }
648  case LayerType::Merge:
649  {
650  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
651  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
652  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
653 
654  result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
655  OverrideDataType(input1, dataType),
656  OverrideDataType(output, dataType),
657  reason);
658  break;
659  }
660  case LayerType::Concat:
661  {
662  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
663 
664  // Get vector of all inputs.
665  auto getTensorInfo = [&dataType](const InputSlot& slot)
666  {
667  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
668  };
669  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
670  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
671  std::vector<TensorInfo> inputs(beginI, endI);
672 
673  auto getTensorInfoPtr = [](const TensorInfo& info)
674  {
675  return &info;
676  };
677  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
678  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
679  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
680 
681  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
682 
683  result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
684 
685 
686  break;
687  }
689  {
690  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
691  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
692  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
693  result = layerSupportObject->IsMultiplicationSupported(
694  OverrideDataType(input0, dataType),
695  OverrideDataType(input1, dataType),
696  OverrideDataType(output, dataType),
697  reason);
698  break;
699  }
701  {
702  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
703  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
704  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
705  result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
706  OverrideDataType(output, dataType),
707  cLayer->GetParameters(),
708  reason);
709  break;
710  }
711  case LayerType::Output:
712  {
713  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
714  result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
715  break;
716  }
717  case LayerType::Permute:
718  {
719  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
720  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
721  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
722  result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
723  OverrideDataType(output, dataType),
724  cLayer->GetParameters(),
725  reason);
726  break;
727  }
728  case LayerType::Pad:
729  {
730  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
731  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
732  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
733  result = layerSupportObject->IsPadSupported(
734  OverrideDataType(input, dataType),
735  OverrideDataType(output, dataType),
736  cLayer->GetParameters(),
737  reason);
738  break;
739  }
741  {
742  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
743  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
744  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
745  result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
746  OverrideDataType(output, dataType),
747  cLayer->GetParameters(),
748  reason);
749  break;
750  }
752  {
753  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
754  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
755  result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
756  cLayer->GetParameters(),
757  reason);
758  break;
759  }
760  case LayerType::Quantize:
761  {
762  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
763  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
764  result = layerSupportObject->IsQuantizeSupported(input, output, reason);
765  break;
766  }
767  case LayerType::QLstm:
768  {
769  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
770  const QLstmDescriptor& descriptor = cLayer->GetParameters();
771 
772  // Inputs
773  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
774  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
775  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
776 
777  // Outputs
778  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
779  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
780  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
781 
782  // Lstm parameters
783  LstmInputParamsInfo paramsInfo;
784 
785  // Basic parameters
786  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
787  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
788  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
789 
790  paramsInfo.m_RecurrentToForgetWeights =
791  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
792  paramsInfo.m_RecurrentToCellWeights =
793  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
794  paramsInfo.m_RecurrentToOutputWeights =
795  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
796 
797  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
798  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
799  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
800 
801  if(!descriptor.m_CifgEnabled)
802  {
803  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
804  paramsInfo.m_RecurrentToInputWeights =
805  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
806  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
807  }
808 
809  if(descriptor.m_ProjectionEnabled)
810  {
811  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
812 
813  // Projection bias is optional even if projection is enabled
814  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
815  {
816  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
817  }
818  }
819 
820  if(descriptor.m_PeepholeEnabled)
821  {
822  if (!descriptor.m_CifgEnabled)
823  {
824  paramsInfo.m_CellToInputWeights =
825  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
826  }
827 
828  paramsInfo.m_CellToForgetWeights =
829  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
830  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
831  }
832 
833  if(descriptor.m_LayerNormEnabled)
834  {
835  if (!descriptor.m_CifgEnabled)
836  {
837  paramsInfo.m_InputLayerNormWeights =
838  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
839  }
840 
841  paramsInfo.m_ForgetLayerNormWeights =
842  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
843  paramsInfo.m_CellLayerNormWeights =
844  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
845  paramsInfo.m_OutputLayerNormWeights =
846  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
847  }
848 
849  result = layerSupportObject->IsQLstmSupported(input,
850  previousOutputIn,
851  previousCellStateIn,
852  outputStateOut,
853  cellStateOut,
854  output,
855  descriptor,
856  paramsInfo,
857  reason);
858  break;
859  }
861  {
862  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
863 
864  // Inputs
865  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
866  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
867  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
868 
869  // Outputs
870  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
871  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
872 
873  // QuantizedLstm parameters
874  QuantizedLstmInputParamsInfo paramsInfo;
875 
876  paramsInfo.m_InputToInputWeights =
877  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
878  paramsInfo.m_InputToForgetWeights =
879  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
880  paramsInfo.m_InputToCellWeights =
881  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
882  paramsInfo.m_InputToOutputWeights =
883  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
884 
885  paramsInfo.m_RecurrentToInputWeights =
886  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
887  paramsInfo.m_RecurrentToForgetWeights =
888  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
889  paramsInfo.m_RecurrentToCellWeights =
890  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
891  paramsInfo.m_RecurrentToOutputWeights =
892  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
893 
894  paramsInfo.m_InputGateBias =
895  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
896  paramsInfo.m_ForgetGateBias =
897  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
898  paramsInfo.m_CellBias =
899  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
900  paramsInfo.m_OutputGateBias =
901  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
902 
903  result = layerSupportObject->IsQuantizedLstmSupported(input,
904  previousCellStateIn,
905  previousOutputIn,
906  cellStateOut,
907  output,
908  paramsInfo,
909  reason);
910  break;
911  }
912  case LayerType::Division:
913  {
914  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
915  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
916  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
917  result = layerSupportObject->IsDivisionSupported(
918  OverrideDataType(input0, dataType),
919  OverrideDataType(input1, dataType),
920  OverrideDataType(output, dataType),
921  reason);
922  break;
923  }
924  case LayerType::Rank:
925  {
926  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
927  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
928  result = layerSupportObject->IsRankSupported(OverrideDataType(input, dataType),
929  OverrideDataType(output, dataType),
930  reason);
931  break;
932  }
933  case LayerType::Reshape:
934  {
935  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
936  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
937  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
938  result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
939  OverrideDataType(output, dataType),
940  cLayer->GetParameters(),
941  reason);
942  break;
943  }
944  case LayerType::Resize:
945  {
946  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
947  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
948  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
949  result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
950  OverrideDataType(output, dataType),
951  cLayer->GetParameters(),
952  reason);
953  break;
954  }
955  case LayerType::Slice:
956  {
957  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
958 
959  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
960  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
961 
962  result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
963  OverrideDataType(output, dataType),
964  cLayer->GetParameters(),
965  reason);
966  break;
967  }
968  case LayerType::Softmax:
969  {
970  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
971  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
972  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
973  result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
974  OverrideDataType(output, dataType),
975  cLayer->GetParameters(),
976  reason);
977  break;
978  }
980  {
981  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
982  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
983  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
984  result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
985  OverrideDataType(output, dataType),
986  cLayer->GetParameters(),
987  reason);
988  break;
989  }
991  {
992  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
993 
994  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
995  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
996 
997  result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
998  OverrideDataType(output, dataType),
999  cLayer->GetParameters(),
1000  reason);
1001  break;
1002  }
1003  case LayerType::Splitter:
1004  {
1005  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1006  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1007 
1008  // Get vector of all outputs.
1009  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1010  {
1011  return OverrideDataType(slot.GetTensorInfo(), dataType);
1012  };
1013  auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
1014  auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
1015  std::vector<TensorInfo> outputs(beginI, endI);
1016 
1017  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1018 
1019  result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
1020  outputPtrs,
1021  cLayer->GetParameters(),
1022  reason);
1023  break;
1024  }
1025  case LayerType::Stack:
1026  {
1027  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1028 
1029  // Get vector of all inputs.
1030  auto getTensorInfo = [&dataType](const InputSlot& slot)
1031  {
1032  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1033  };
1034  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
1035  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
1036  std::vector<TensorInfo> inputs(beginI, endI);
1037 
1038  auto getTensorInfoPtr = [](const TensorInfo& info)
1039  {
1040  return &info;
1041  };
1042  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
1043  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
1044  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1045 
1046  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1047 
1048  result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1049 
1050  break;
1051  }
1052  case LayerType::StandIn:
1053  {
1054  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1055 
1056  // Get vector of all inputs.
1057  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1058  {
1059  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1060  };
1061  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1062  {
1063  return OverrideDataType(slot.GetTensorInfo(), dataType);
1064  };
1065  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1066  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
1067  std::vector<TensorInfo> inputs(beginI, endI);
1068 
1069  auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1070  auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1071  std::vector<TensorInfo> outputs(beginO, endO);
1072 
1073 
1074  auto getTensorInfoPtr = [](const TensorInfo& info)
1075  {
1076  return &info;
1077  };
1078  auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
1079  auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
1080  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1081 
1082  auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
1083  auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
1084  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1085 
1086 
1087  result = layerSupportObject->IsStandInSupported(inputPtrs,
1088  outputPtrs,
1089  cLayer->GetParameters(),
1090  reason);
1091  break;
1092  }
1094  {
1095  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1096  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1097  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1098  result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
1099  OverrideDataType(output, dataType),
1100  cLayer->GetParameters(),
1101  reason);
1102  break;
1103  }
1105  {
1106  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1107  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1108  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1109  result = layerSupportObject->IsSubtractionSupported(
1110  OverrideDataType(input0, dataType),
1111  OverrideDataType(input1, dataType),
1112  OverrideDataType(output, dataType),
1113  reason);
1114  break;
1115  }
1116  case LayerType::Switch:
1117  {
1118  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1119  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1120  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1121  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1122  result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
1123  OverrideDataType(input1, dataType),
1124  OverrideDataType(output0, dataType),
1125  OverrideDataType(output1, dataType),
1126  reason);
1127  break;
1128  }
1129  case LayerType::Mean:
1130  {
1131  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1132  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1133  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1134  result = layerSupportObject->IsMeanSupported(
1135  OverrideDataType(input, dataType),
1136  OverrideDataType(output, dataType),
1137  cLayer->GetParameters(),
1138  reason);
1139  break;
1140  }
1141  case LayerType::Minimum:
1142  {
1143  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1144  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1145  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1146  result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1147  OverrideDataType(input1, dataType),
1148  OverrideDataType(output, dataType),
1149  reason);
1150  break;
1151  }
1152  case LayerType::Prelu:
1153  {
1154  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1155  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1156  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1157  result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1158  OverrideDataType(alpha, dataType),
1159  OverrideDataType(output, dataType),
1160  reason);
1161  break;
1162  }
1163  case LayerType::Transpose:
1164  {
1165  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1166  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1167  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1168  result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1169  OverrideDataType(output, dataType),
1170  cLayer->GetParameters(),
1171  reason);
1172  break;
1173  }
1175  {
1176  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1177 
1178  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1179  dataType);
1180  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1181 
1182  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1183 
1184  Optional<TensorInfo> biases;
1185  if (descriptor.m_BiasEnabled)
1186  {
1187  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1188  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1189  GetBiasTypeFromWeightsType(dataType));
1190  }
1191 
1192  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1193  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1194 
1195  result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1196  output,
1197  descriptor,
1198  weights,
1199  biases,
1200  reason);
1201 
1202  break;
1203  }
1204  default:
1205  {
1206  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1207  reason.value() = "Unrecognised layer type";
1208  result = false;
1209  break;
1210  }
1211  }
1212  return result;
1213 }
1214 
1216  Optional<DataType> dataType,
1217  std::string& outReasonIfUnsupported)
1218 {
1219  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1220  return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1221 }
1222 
1223 // Default Implementations
1224 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1225  const WorkloadInfo& /*info*/) const
1226 {
1227  return std::unique_ptr<IWorkload>();
1228 }
1229 
1230 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1231  const WorkloadInfo& /*info*/) const
1232 {
1233  return std::unique_ptr<IWorkload>();
1234 }
1235 
1236 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1237  const WorkloadInfo& /*info*/) const
1238 {
1239  return std::unique_ptr<IWorkload>();
1240 }
1241 
1242 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1243  const WorkloadInfo& /*info*/) const
1244 {
1245  return std::unique_ptr<IWorkload>();
1246 }
1247 
1249  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1250 {
1251  return std::unique_ptr<IWorkload>();
1252 }
1253 
1255  const WorkloadInfo& /*Info*/) const
1256 {
1257  return std::unique_ptr<IWorkload>();
1258 }
1259 
1260 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1261  const WorkloadInfo& /*info*/) const
1262 {
1263  return std::unique_ptr<IWorkload>();
1264 }
1265 
1266 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1267  const WorkloadInfo& /*info*/) const
1268 {
1269  return std::unique_ptr<IWorkload>();
1270 }
1271 
1272 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1273  const WorkloadInfo& /*info*/) const
1274 {
1275  return std::unique_ptr<IWorkload>();
1276 }
1277 
1279  const WorkloadInfo& /*info*/) const
1280 {
1281  return std::unique_ptr<IWorkload>();
1282 }
1283 
1285  const WorkloadInfo& /*info*/) const
1286 {
1287  return std::unique_ptr<IWorkload>();
1288 }
1289 
1291  const WorkloadInfo& /*info*/) const
1292 {
1293  return std::unique_ptr<IWorkload>();
1294 }
1295 
1297  const WorkloadInfo& /*info*/) const
1298 {
1299  return std::unique_ptr<IWorkload>();
1300 }
1301 
1302 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1303  const WorkloadInfo& /*info*/) const
1304 {
1305  return std::unique_ptr<IWorkload>();
1306 }
1307 
1308 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1309  const WorkloadInfo& /*info*/) const
1310 {
1311  return std::unique_ptr<IWorkload>();
1312 }
1313 
1314 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1315  const WorkloadInfo& /*info*/) const
1316 {
1317  return std::unique_ptr<IWorkload>();
1318 }
1319 
1321  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1322 {
1323  return std::unique_ptr<IWorkload>();
1324 }
1325 
1326 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1327  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1328 {
1329  return std::unique_ptr<IWorkload>();
1330 }
1331 
1333  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1334 {
1335  return std::unique_ptr<IWorkload>();
1336 }
1337 
1338 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1339  const WorkloadInfo& /*info*/) const
1340 {
1341  return std::unique_ptr<IWorkload>();
1342 }
1343 
1345  const WorkloadInfo& /*info*/) const
1346 {
1347  return std::unique_ptr<IWorkload>();
1348 }
1349 
1350 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1351  const WorkloadInfo& /*Info*/) const
1352 {
1353  return std::unique_ptr<IWorkload>();
1354 }
1355 
1357  const WorkloadInfo& /*info*/) const
1358 {
1359  return std::unique_ptr<IWorkload>();
1360 }
1361 
1362 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
1363  const WorkloadInfo& /*info*/) const
1364 {
1365  return std::unique_ptr<IWorkload>();
1366 }
1367 
1368 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1369  const WorkloadInfo& /*info*/) const
1370 {
1371  return std::unique_ptr<IWorkload>();
1372 }
1373 
1374 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1375  const WorkloadInfo& /*info*/) const
1376 {
1377  return std::unique_ptr<IWorkload>();
1378 }
1379 
1380 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1381  const WorkloadInfo& /*info*/) const
1382 {
1383  return std::unique_ptr<IWorkload>();
1384 }
1385 
1386 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1387  const WorkloadInfo& /*info*/) const
1388 {
1389  return std::unique_ptr<IWorkload>();
1390 }
1391 
1393  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1394  const WorkloadInfo& /*info*/) const
1395 {
1396  return std::unique_ptr<IWorkload>();
1397 }
1398 
1400  const WorkloadInfo& /*info*/) const
1401 {
1402  return std::unique_ptr<IWorkload>();
1403 }
1404 
1405 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1406  const WorkloadInfo& /*info*/) const
1407 {
1408  return std::unique_ptr<IWorkload>();
1409 }
1410 
1411 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1412  const WorkloadInfo& /*info*/) const
1413 {
1414  return std::unique_ptr<IWorkload>();
1415 }
1416 
1417 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1418  const WorkloadInfo& /*info*/) const
1419 {
1420  return std::unique_ptr<IWorkload>();
1421 }
1422 
1423 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1424  const WorkloadInfo& /*Info*/) const
1425 {
1426  return std::unique_ptr<IWorkload>();
1427 }
1428 
1429 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1430  const WorkloadInfo& /*info*/) const
1431 {
1432  return std::unique_ptr<IWorkload>();
1433 }
1434 
1435 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1436  const WorkloadInfo& /*info*/) const
1437 {
1438  return std::unique_ptr<IWorkload>();
1439 }
1440 
1441 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1442  const WorkloadInfo& /*info*/) const
1443 {
1444  return std::unique_ptr<IWorkload>();
1445 }
1446 
1447 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1448  const WorkloadInfo& /*info*/) const
1449 {
1450  return std::unique_ptr<IWorkload>();
1451 }
1452 
1453 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1454  const WorkloadInfo& /*info*/) const
1455 {
1456  return std::unique_ptr<IWorkload>();
1457 }
1458 
1459 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1460  const WorkloadInfo& /*info*/) const
1461 {
1462  return std::unique_ptr<IWorkload>();
1463 }
1464 
1465 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1466  const WorkloadInfo& /*info*/) const
1467 {
1468  return std::unique_ptr<IWorkload>();
1469 }
1470 
1471 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1472  const WorkloadInfo& /*info*/) const
1473 {
1474  return std::unique_ptr<IWorkload>();
1475 }
1476 
1477 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1478  const WorkloadInfo& /*Info*/) const
1479 {
1480  return std::unique_ptr<IWorkload>();
1481 }
1482 
1483 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1484  const WorkloadInfo& /*info*/) const
1485 {
1486  return std::unique_ptr<IWorkload>();
1487 }
1488 
1489 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1490  const WorkloadInfo& /*info*/) const
1491 {
1492  return std::unique_ptr<IWorkload>();
1493 }
1494 
1495 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1496  const WorkloadInfo& /*info*/) const
1497 {
1498  return std::unique_ptr<IWorkload>();
1499 }
1500 
1501 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1502  const WorkloadInfo &/*info*/) const
1503 {
1504  return std::unique_ptr<IWorkload>();
1505 }
1506 
1507 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1508  const WorkloadInfo& /*Info*/) const
1509 {
1510  return std::unique_ptr<IWorkload>();
1511 }
1512 
1513 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
1514  const WorkloadInfo& /*info*/) const
1515 {
1516  return std::unique_ptr<IWorkload>();
1517 }
1518 
1519 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1520  const WorkloadInfo& /*info*/) const
1521 {
1522  return std::unique_ptr<IWorkload>();
1523 }
1524 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
1525  const WorkloadInfo& /*info*/) const
1526 {
1527  return std::unique_ptr<IWorkload>();
1528 }
1529 
1530 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1531  const WorkloadInfo& /*info*/) const
1532 {
1533  return std::unique_ptr<IWorkload>();
1534 }
1535 
1536 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1537  const WorkloadInfo& /*info*/) const
1538 {
1539  return std::unique_ptr<IWorkload>();
1540 }
1541 
1542 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1543  const WorkloadInfo& /*info*/) const
1544 {
1545  return std::unique_ptr<IWorkload>();
1546 }
1547 
1548 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1549  const WorkloadInfo& /*info*/) const
1550 {
1551  return std::unique_ptr<IWorkload>();
1552 }
1553 
1554 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1555  const WorkloadInfo& /*info*/) const
1556 {
1557  return std::unique_ptr<IWorkload>();
1558 }
1559 
1560 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1561  const WorkloadInfo& /*info*/) const
1562 {
1563  return std::unique_ptr<IWorkload>();
1564 }
1565 
1566 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1567  const WorkloadInfo& /*info*/) const
1568 {
1569  return std::unique_ptr<IWorkload>();
1570 }
1571 
1572 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1573  const WorkloadInfo& /*info*/) const
1574 {
1575  return std::unique_ptr<IWorkload>();
1576 }
1577 
1578 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1579  const WorkloadInfo& /*info*/) const
1580 {
1581  return std::unique_ptr<IWorkload>();
1582 }
1583 
1584 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1585  const WorkloadInfo& /*info*/) const
1586 {
1587  return std::unique_ptr<IWorkload>();
1588 }
1589 
1590 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1591  const WorkloadInfo& /*info*/) const
1592 {
1593  return std::unique_ptr<IWorkload>();
1594 }
1595 
1596 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1597  const WorkloadInfo& /*info*/) const
1598 {
1599  return std::unique_ptr<IWorkload>();
1600 }
1601 
1602 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1603  const WorkloadInfo& /*info*/) const
1604 {
1605  return std::unique_ptr<IWorkload>();
1606 }
1607 
1608 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1609  const WorkloadInfo& /*info*/) const
1610 {
1611  return std::unique_ptr<IWorkload>();
1612 }
1613 
1615  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1616  const WorkloadInfo& /*info*/) const
1617 {
1618  return std::unique_ptr<IWorkload>();
1619 }
1620 
1621 } // namepsace armnn
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_InputLayerNormWeights
Definition: LstmParams.hpp:106
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_OutputGateBias
Definition: LstmParams.hpp:103
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_ProjectionWeights
Definition: LstmParams.hpp:104
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
A Convolution2dDescriptor for the Convolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:107
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQLstm(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
Copyright (c) 2020 ARM Limited.
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:233
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
const TensorInfo * m_ForgetGateBias
Definition: LstmParams.hpp:101
const TensorInfo * m_OutputLayerNormWeights
Definition: LstmParams.hpp:109
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_RecurrentToCellWeights
Definition: LstmParams.hpp:95
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_InputToCellWeights
Definition: LstmParams.hpp:91
const TensorInfo * m_RecurrentToInputWeights
Definition: LstmParams.hpp:93
const TensorInfo * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:96
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_CellToOutputWeights
Definition: LstmParams.hpp:99
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
An LstmDescriptor for the LstmLayer.
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
A FullyConnectedDescriptor for the FullyConnectedLayer.
const TensorInfo * m_CellLayerNormWeights
Definition: LstmParams.hpp:108
bool m_BiasEnabled
Enable/disable bias.
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
A GatherDescriptor for the GatherLayer.
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
A QLstmDescriptor for the QLstmLayer.
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_CellToInputWeights
Definition: LstmParams.hpp:97
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFill(const FillQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:234
const TensorInfo * m_InputGateBias
Definition: LstmParams.hpp:100
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_InputToOutputWeights
Definition: LstmParams.hpp:92
const TensorInfo * m_InputToInputWeights
Definition: LstmParams.hpp:89
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const TensorInfo * m_CellToForgetWeights
Definition: LstmParams.hpp:98
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:94
LayerType GetType() const
Definition: Layer.hpp:261
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
virtual const TensorInfo & GetTensorInfo() const =0
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
virtual const char * GetName() const =0
Returns the name of the layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
const TensorInfo * m_CellBias
Definition: LstmParams.hpp:102
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const