ArmNN
 23.08
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
18 
19 #include <sstream>
20 
21 namespace armnn
22 {
23 
24 namespace
25 {
26 using LayerList = std::list<Layer*>;
27 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
28 
29 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
30 {
31  if (!type)
32  {
33  return info;
34  }
35 
36  return TensorInfo(info.GetShape(),
37  type.value(),
38  info.GetQuantizationScale(),
39  info.GetQuantizationOffset(),
40  info.IsConstant());
41 }
42 
43 } // anonymous namespace
44 
46 {
47  if (!weightsType)
48  {
49  return weightsType;
50  }
51 
52  switch(weightsType.value())
53  {
57  return weightsType;
63  default:
64  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
65  }
66  return armnn::EmptyOptional();
67 }
68 
69 
70 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
71  const IConnectableLayer& connectableLayer,
72  Optional<DataType> dataType,
73  std::string& outReasonIfUnsupported,
74  const ModelOptions& modelOptions)
75 {
76  Optional<std::string&> reason = outReasonIfUnsupported;
77  bool result;
78  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
79 
80  auto const& backendRegistry = BackendRegistryInstance();
81  if (!backendRegistry.IsBackendRegistered(backendId))
82  {
83  std::stringstream ss;
84  ss << connectableLayer.GetName() << " is not supported on " << backendId
85  << " because this backend is not registered.";
86 
87  outReasonIfUnsupported = ss.str();
88  return false;
89  }
90 
91  auto backendFactory = backendRegistry.GetFactory(backendId);
92  auto backendObject = backendFactory();
93  auto layerSupport = backendObject->GetLayerSupport(modelOptions);
94  auto layerSupportObject = LayerSupportHandle(layerSupport, backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
113  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
114  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
115  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
116  result = layerSupportObject.IsAdditionSupported(
117  OverrideDataType(input0, dataType),
118  OverrideDataType(input1, dataType),
119  OverrideDataType(output, dataType),
120  reason);
122  break;
123  }
125  {
126  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
127  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
128 
129  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
130  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
131  result = layerSupportObject.IsArgMinMaxSupported(
132  OverrideDataType(input, dataType),
133  OverrideDataType(output, DataType::Signed32),
134  descriptor,
135  reason);
136  break;
137  }
139  {
140  auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
141  const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
142 
143  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
144  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
145  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
146  result = layerSupportObject.IsBatchMatMulSupported(
147  OverrideDataType(input0, dataType),
148  OverrideDataType(input1, dataType),
149  OverrideDataType(output, dataType),
150  descriptor,
151  reason);
152  break;
153  }
155  {
156  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
157  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
160  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
161  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
162  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
163  result = layerSupportObject.IsBatchNormalizationSupported(
164  OverrideDataType(input, dataType),
165  OverrideDataType(output, dataType),
166  OverrideDataType(mean, dataType),
167  OverrideDataType(var, dataType),
168  OverrideDataType(beta, dataType),
169  OverrideDataType(gamma, dataType),
170  cLayer->GetParameters(),
171  reason);
172  break;
173  }
175  {
176  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
177  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
178  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
179 
180  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
181  OverrideDataType(output, dataType),
182  cLayer->GetParameters(),
183  reason);
184  break;
185  }
186  case LayerType::Cast:
187  {
188  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
189  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
190 
191  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
192  OverrideDataType(output, dataType),
193  reason);
194  break;
195  }
197  {
198  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
199 
200  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
201  const TensorInfo& output = layer.GetInputSlot(0).GetTensorInfo();
202 
203  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
204 
205  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
206  OverrideDataType(output, dataType),
207  descriptor,
208  reason);
209  break;
210  }
212  {
213  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
214 
215  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
216  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
217  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
218 
219  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
220  OverrideDataType(input1, dataType),
221  OverrideDataType(output, DataType::Boolean),
222  cLayer->GetParameters(),
223  reason);
224  break;
225  }
226  case LayerType::Constant:
227  {
228  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
229  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
230  break;
231  }
233  {
234  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
235  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
236  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
237  break;
238  }
240  {
241  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
242  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
243  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
244  break;
245  }
247  {
248  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
249 
250  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
251  dataType);
252  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
253  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
254  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
255  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
256  dataType);
257 
258  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
259 
260  // Construct optional biases object based on the value of m_BiasEnabled
261  Optional<TensorInfo> biases;
262  if (descriptor.m_BiasEnabled)
263  {
264  ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
265  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
266  biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
267  GetBiasTypeFromWeightsType(dataType));
268  }
269 
270  result = layerSupportObject.IsConvolution2dSupported(
271  input,
272  output,
273  descriptor,
274  weights,
275  biases,
276  reason);
277  break;
278  }
280  {
281  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
282 
283  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
284  dataType);
285  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
286 
287  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
288  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
289  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
290  dataType);
291 
292  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
293 
294  // Construct optional biases object based on the value of m_BiasEnabled
295  Optional<TensorInfo> biases;
296  if (descriptor.m_BiasEnabled)
297  {
298  biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
299  GetBiasTypeFromWeightsType(dataType));
300  }
301 
302  result = layerSupportObject.IsConvolution3dSupported(
303  input,
304  output,
305  descriptor,
306  weights,
307  biases,
308  reason);
309  break;
310  }
311  case LayerType::Debug:
312  {
313  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
314  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
315 
316  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
317  OverrideDataType(output, dataType),
318  reason);
319  break;
320  }
322  {
323  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
324 
325  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
326  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
327 
328  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
329  OverrideDataType(output, dataType),
330  cLayer->GetParameters(),
331  reason);
332  break;
333  }
335  {
336  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
337  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
338  dataType);
339  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
340  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
341  dataType);
342 
343  ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
344 
345  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
346 
347  // Construct optional biases object based on the value of m_BiasEnabled
348  Optional<TensorInfo> biases;
349  if (descriptor.m_BiasEnabled)
350  {
351  biases = OverrideDataType(cLayer->GetInputSlot(2).GetTensorInfo(),
352  GetBiasTypeFromWeightsType(dataType));
353  }
354 
355  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
356  output,
357  descriptor,
358  weights,
359  biases,
360  reason);
361  break;
362  }
364  {
365  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
366  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
367 
368  result = layerSupportObject.IsDequantizeSupported(input,
369  OverrideDataType(output, dataType),
370  reason);
371  break;
372  }
374  {
375  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
376  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetTensorInfo();
377  const TensorInfo& scores = layer.GetInputSlot(1).GetTensorInfo();
378  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
379 
380  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
381  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
382  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
383  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
384 
385  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
386  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
387  scores,
388  anchors,
389  detectionBoxes,
390  detectionClasses,
391  detectionScores,
392  numDetections,
393  descriptor,
394  reason);
395  break;
396  }
398  {
399  auto cLayer = PolymorphicDowncast<const ElementwiseBinaryLayer*>(&layer);
400 
401  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
402  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
403  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
404  std::vector<TensorInfo> infos = { OverrideDataType(input0, dataType),
405  OverrideDataType(input1, dataType),
406  OverrideDataType(output, dataType) };
407  result = layerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
408  infos,
409  cLayer->GetParameters(),
410  EmptyOptional(),
411  EmptyOptional(),
412  reason);
413  break;
414  }
416  {
417  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
418 
419  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
420  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
421 
422  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
423  OverrideDataType(output, dataType),
424  cLayer->GetParameters(),
425  reason);
426  break;
427  }
428  case LayerType::Fill:
429  {
430  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
431  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
432  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
433  const FillDescriptor& descriptor = cLayer->GetParameters();
434 
435  result = layerSupportObject.IsFillSupported(
436  OverrideDataType(input, dataType),
437  OverrideDataType(output, dataType),
438  descriptor,
439  reason);
440  break;
441  }
443  {
444  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
445  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
446  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
447  cLayer->GetParameters(),
448  reason);
449  break;
450  }
451  case LayerType::Floor:
452  {
453  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
454  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
455  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
456  OverrideDataType(output, dataType),
457  reason);
458  break;
459  }
461  {
462  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
463  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
464  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
465 
466  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
467  TensorInfo weightsInfo;
468  const TensorInfo* weightsInfoPtr = nullptr;
469 
470  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(), dataType);
471  weightsInfoPtr = &weightsInfo;
472 
473  TensorInfo biasInfo;
474  const TensorInfo* biasInfoPtr = nullptr;
475  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
476  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
477  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
478  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
479 
480  if (descriptor.m_BiasEnabled)
481  {
482  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(), dataType);
483  biasInfoPtr = &biasInfo;
484  }
485  else
486  {
487  // If biases are not enabled pass a dummy tensorinfo for the validation
488  switch(input.GetDataType())
489  {
490  case DataType::BFloat16:
491  {
492  biasInfoPtr = &dummyBFloat16Bias;
493  break;
494  }
495  case DataType::Float16:
496  {
497  biasInfoPtr = &dummyFloat16Bias;
498  break;
499  }
500  case DataType::Float32:
501  {
502  biasInfoPtr = &dummyFloat32Bias;
503  break;
504  }
505  case DataType::QAsymmU8:
506  case DataType::QAsymmS8:
507  case DataType::QSymmS8:
508  case DataType::QSymmS16:
509  {
510  biasInfoPtr = &dummyQA8Bias;
511  break;
512  }
513  default:
514  {
515  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
516  }
517  }
518  }
519  result = layerSupportObject.IsFullyConnectedSupported(
520  OverrideDataType(input, dataType),
521  OverrideDataType(output, dataType),
522  *weightsInfoPtr,
523  *biasInfoPtr,
524  descriptor,
525  reason);
526  break;
527  }
528  case LayerType::Gather:
529  {
530  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
531  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
532  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
533  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
534  const GatherDescriptor& descriptor = cLayer->GetParameters();
535  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
536  input1,
537  OverrideDataType(output, dataType),
538  descriptor,
539  reason);
540  break;
541  }
542  case LayerType::GatherNd:
543  {
544  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
545  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
546  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
547  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
548  input1,
549  OverrideDataType(output, dataType),
550  reason);
551  break;
552  }
553  case LayerType::Input:
554  {
555  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
556  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
557  break;
558  }
560  {
561  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
562  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
563 
564  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
565  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
566 
567  result = layerSupportObject.IsInstanceNormalizationSupported(
568  OverrideDataType(input, dataType),
569  OverrideDataType(output, dataType),
570  descriptor,
571  reason);
572  break;
573  }
575  {
576  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
577  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
578 
579  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
580  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
581 
582  result = layerSupportObject.IsL2NormalizationSupported(
583  OverrideDataType(input, dataType),
584  OverrideDataType(output, dataType),
585  descriptor,
586  reason);
587  break;
588  }
590  {
591  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
592 
593  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
594  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
595  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
596 
597  result = layerSupportObject.IsLogicalBinarySupported(input0,
598  input1,
599  output,
600  cLayer->GetParameters(),
601  reason);
602  break;
603  }
605  {
606  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
607 
608  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
609  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
610 
611  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
612  OverrideDataType(output, dataType),
613  cLayer->GetParameters(),
614  reason);
615  break;
616  }
617  case LayerType::Lstm:
618  {
619  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
620  const LstmDescriptor& descriptor = cLayer->GetParameters();
621 
622  // All inputs.
623  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
624  dataType);
625  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
626  dataType);
627  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
628  dataType);
629  // All outputs
630  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
631  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
632  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
633  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
634 
635  // Basic parameters
636  const TensorInfo& inputToForgetWeights
637  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
638  const TensorInfo& inputToCellWeights
639  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
640  const TensorInfo& inputToOutputWeights
641  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
642  const TensorInfo& recurrentToForgetWeights
643  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
644  const TensorInfo& recurrentToCellWeights
645  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
646  const TensorInfo& recurrentToOutputWeights
647  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
648  const TensorInfo& forgetGateBias
649  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
650  const TensorInfo& cellBias
651  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
652  const TensorInfo& outputGateBias
653  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
654 
655  LstmInputParamsInfo paramsInfo;
656 
657  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
658  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
659  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
660  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
661  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
662  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
663  paramsInfo.m_ForgetGateBias = &forgetGateBias;
664  paramsInfo.m_CellBias = &cellBias;
665  paramsInfo.m_OutputGateBias = &outputGateBias;
666 
667 
668  // Optional parameters
669  TensorInfo optInputToInputWeights;
670  TensorInfo optRecurrentToInputWeights;
671  TensorInfo optCellToInputWeights;
672  TensorInfo optInputGateBias;
673  TensorInfo optProjectionWeights;
674  TensorInfo optProjectionBias;
675  TensorInfo optCellToForgetWeights;
676  TensorInfo optCellToOutputWeights;
677  TensorInfo optInputLayerNormWeights;
678  TensorInfo optForgetLayerNormWeights;
679  TensorInfo optCellLayerNormWeights;
680  TensorInfo optOutputLayerNormWeights;
681 
682  if(!descriptor.m_CifgEnabled)
683  {
684  optInputToInputWeights =
685  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
686  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
687 
688  optRecurrentToInputWeights =
689  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
690  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
691  optInputGateBias =
692  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
693  paramsInfo.m_InputGateBias = &optInputGateBias;
694  }
695 
696  if(descriptor.m_ProjectionEnabled)
697  {
698  optProjectionWeights =
699  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
700  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
701  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
702  {
703  optProjectionBias =
704  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
705  paramsInfo.m_ProjectionBias = &optProjectionBias;
706  }
707  }
708 
709  if(descriptor.m_PeepholeEnabled)
710  {
711  if(!descriptor.m_CifgEnabled)
712  {
713  optCellToInputWeights =
714  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
715  dataType);
716  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
717  }
718  optCellToForgetWeights =
719  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
720  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
721  optCellToOutputWeights =
722  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
723  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
724  }
725 
726  if(descriptor.m_LayerNormEnabled)
727  {
728  if (!descriptor.m_CifgEnabled)
729  {
730  optInputLayerNormWeights = OverrideDataType(
731  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
732  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
733  }
734 
735  optForgetLayerNormWeights = OverrideDataType(
736  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
737  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
738 
739  optCellLayerNormWeights = OverrideDataType(
740  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
741  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
742 
743  optOutputLayerNormWeights = OverrideDataType(
744  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
745  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
746  }
747 
748  result = layerSupportObject.IsLstmSupported(
749  input,
750  outputStateIn,
751  cellStateIn,
752  scratchBuffer,
753  outputStateOut,
754  cellStateOut,
755  output,
756  descriptor,
757  paramsInfo,
758  reason);
759  break;
760  }
761  case LayerType::Maximum:
762  {
764  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
765  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
766  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
767 
768  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
769  OverrideDataType(input1, dataType),
770  OverrideDataType(output, dataType),
771  reason);
773  break;
774  }
775  case LayerType::MemCopy:
776  {
777  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
778  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
779 
780  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
781  OverrideDataType(output, dataType),
782  reason);
783  break;
784  }
786  {
787  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
788  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
789 
790  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
791  OverrideDataType(output, dataType),
792  reason);
793  break;
794  }
795  case LayerType::Merge:
796  {
797  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
798  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
799  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
800 
801  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
802  OverrideDataType(input1, dataType),
803  OverrideDataType(output, dataType),
804  reason);
805  break;
806  }
807  case LayerType::Concat:
808  {
809  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
810 
811  // Get vector of all inputs.
812  auto getTensorInfo = [&dataType](const InputSlot& slot)
813  {
814  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
815  };
816 
817  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
818  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
819  std::vector<TensorInfo> inputs(beginI, endI);
820 
821  auto getTensorInfoPtr = [](const TensorInfo& info)
822  {
823  return &info;
824  };
825 
826  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
827  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
828  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
829 
830  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
831 
832  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
833 
834 
835  break;
836  }
838  {
840  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
841  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
842  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
843  result = layerSupportObject.IsMultiplicationSupported(
844  OverrideDataType(input0, dataType),
845  OverrideDataType(input1, dataType),
846  OverrideDataType(output, dataType),
847  reason);
849  break;
850  }
852  {
853  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
854  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
855  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
856  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
857  OverrideDataType(output, dataType),
858  cLayer->GetParameters(),
859  reason);
860  break;
861  }
862  case LayerType::Output:
863  {
864  const TensorInfo& output = layer.GetInputSlot(0).GetTensorInfo();
865  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
866  break;
867  }
868  case LayerType::Permute:
869  {
870  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
871  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
872  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
873  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
874  OverrideDataType(output, dataType),
875  cLayer->GetParameters(),
876  reason);
877  break;
878  }
879  case LayerType::Pad:
880  {
881  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
882  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
883  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
884  result = layerSupportObject.IsPadSupported(
885  OverrideDataType(input, dataType),
886  OverrideDataType(output, dataType),
887  cLayer->GetParameters(),
888  reason);
889  break;
890  }
892  {
893  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
894  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
895  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
896  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
897  OverrideDataType(output, dataType),
898  cLayer->GetParameters(),
899  reason);
900  break;
901  }
903  {
904  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
905  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
906  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
907  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
908  OverrideDataType(output, dataType),
909  cLayer->GetParameters(),
910  reason);
911  break;
912  }
914  {
915  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
916  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
917  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
918  cLayer->GetParameters(),
919  reason);
920  break;
921  }
922  case LayerType::Quantize:
923  {
924  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
925  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
926  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
927  break;
928  }
929  case LayerType::QLstm:
930  {
931  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
932  const QLstmDescriptor& descriptor = cLayer->GetParameters();
933 
934  // Inputs
935  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
936  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetTensorInfo();
937  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetTensorInfo();
938 
939  // Outputs
940  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
941  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
942  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
943 
944  // Lstm parameters
945  LstmInputParamsInfo paramsInfo;
946 
947  // Basic parameters
948  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
949  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
950  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
951  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
952  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
953  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
954 
955  paramsInfo.m_RecurrentToForgetWeights =
956  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
957  paramsInfo.m_RecurrentToCellWeights =
958  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
959  paramsInfo.m_RecurrentToOutputWeights =
960  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
961 
962  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
963  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
964  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
965 
966  if(!descriptor.m_CifgEnabled)
967  {
968  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
969  paramsInfo.m_RecurrentToInputWeights =
970  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
971  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
972  }
973 
974  if(descriptor.m_ProjectionEnabled)
975  {
976  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
977 
978  // Projection bias is optional even if projection is enabled
979  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
980  {
981  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
982  }
983  }
984 
985  if(descriptor.m_PeepholeEnabled)
986  {
987  if (!descriptor.m_CifgEnabled)
988  {
989  paramsInfo.m_CellToInputWeights =
990  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
991  }
992 
993  paramsInfo.m_CellToForgetWeights =
994  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
995  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
996  }
997 
998  if(descriptor.m_LayerNormEnabled)
999  {
1000  if (!descriptor.m_CifgEnabled)
1001  {
1002  paramsInfo.m_InputLayerNormWeights =
1003  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
1004  }
1005 
1006  paramsInfo.m_ForgetLayerNormWeights =
1007  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
1008  paramsInfo.m_CellLayerNormWeights =
1009  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
1010  paramsInfo.m_OutputLayerNormWeights =
1011  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
1012  }
1013 
1014  result = layerSupportObject.IsQLstmSupported(input,
1015  previousOutputIn,
1016  previousCellStateIn,
1017  outputStateOut,
1018  cellStateOut,
1019  output,
1020  descriptor,
1021  paramsInfo,
1022  reason);
1023  break;
1024  }
1026  {
1027  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1028 
1029  // Inputs
1030  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1031  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetTensorInfo();
1032  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetTensorInfo();
1033 
1034  // Outputs
1035  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1036  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1037 
1038  // QuantizedLstm parameters
1039  QuantizedLstmInputParamsInfo paramsInfo;
1040 
1041  paramsInfo.m_InputToInputWeights =
1042  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1043  paramsInfo.m_InputToForgetWeights =
1044  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1045  paramsInfo.m_InputToCellWeights =
1046  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1047  paramsInfo.m_InputToOutputWeights =
1048  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1049 
1050  paramsInfo.m_RecurrentToInputWeights =
1051  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1052  paramsInfo.m_RecurrentToForgetWeights =
1053  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1054  paramsInfo.m_RecurrentToCellWeights =
1055  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1056  paramsInfo.m_RecurrentToOutputWeights =
1057  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1058 
1059  paramsInfo.m_InputGateBias =
1060  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1061  paramsInfo.m_ForgetGateBias =
1062  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1063  paramsInfo.m_CellBias =
1064  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1065  paramsInfo.m_OutputGateBias =
1066  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1067 
1068  result = layerSupportObject.IsQuantizedLstmSupported(input,
1069  previousCellStateIn,
1070  previousOutputIn,
1071  cellStateOut,
1072  output,
1073  paramsInfo,
1074  reason);
1075  break;
1076  }
1077  case LayerType::Division:
1078  {
1080  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1081  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1082  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1083  result = layerSupportObject.IsDivisionSupported(
1084  OverrideDataType(input0, dataType),
1085  OverrideDataType(input1, dataType),
1086  OverrideDataType(output, dataType),
1087  reason);
1089  break;
1090  }
1091  case LayerType::Rank:
1092  {
1093  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1094  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1095  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1096  OverrideDataType(output, dataType),
1097  reason);
1098  break;
1099  }
1100  case LayerType::Reshape:
1101  {
1102  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1103  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1104  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1105  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1106  OverrideDataType(output, dataType),
1107  cLayer->GetParameters(),
1108  reason);
1109  break;
1110  }
1111  case LayerType::Resize:
1112  {
1113  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1114  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1115  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1116  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1117  OverrideDataType(output, dataType),
1118  cLayer->GetParameters(),
1119  reason);
1120  break;
1121  }
1122  case LayerType::ReverseV2:
1123  {
1124  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1125  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1126  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1127  result = layerSupportObject.IsReverseV2Supported(OverrideDataType(input0, dataType),
1128  OverrideDataType(input1, armnn::DataType::Signed32),
1129  OverrideDataType(output, dataType),
1130  reason);
1131  break;
1132  }
1133  case LayerType::Shape:
1134  {
1135  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1136  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1137 
1138  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1139  OverrideDataType(output, dataType),
1140  reason);
1141  break;
1142  }
1143  case LayerType::Slice:
1144  {
1145  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1146 
1147  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1148  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1149 
1150  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1151  OverrideDataType(output, dataType),
1152  cLayer->GetParameters(),
1153  reason);
1154  break;
1155  }
1156  case LayerType::Softmax:
1157  {
1158  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1159  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1160  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1161  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1162  OverrideDataType(output, dataType),
1163  cLayer->GetParameters(),
1164  reason);
1165  break;
1166  }
1168  {
1169  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1170  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1171  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1172  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1173  OverrideDataType(output, dataType),
1174  cLayer->GetParameters(),
1175  reason);
1176  break;
1177  }
1179  {
1180  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1181 
1182  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1183  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1184 
1185  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1186  OverrideDataType(output, dataType),
1187  cLayer->GetParameters(),
1188  reason);
1189  break;
1190  }
1191  case LayerType::Splitter:
1192  {
1193  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1194  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1195 
1196  // Get vector of all outputs.
1197  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1198  {
1199  return OverrideDataType(slot.GetTensorInfo(), dataType);
1200  };
1201  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1202  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1203  std::vector<TensorInfo> outputs(beginI, endI);
1204 
1205  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1206 
1207  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1208  outputPtrs,
1209  cLayer->GetParameters(),
1210  reason);
1211  break;
1212  }
1213  case LayerType::Stack:
1214  {
1215  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1216 
1217  // Get vector of all inputs.
1218  auto getTensorInfo = [&dataType](const InputSlot& slot)
1219  {
1220  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1221  };
1222  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1223  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1224  std::vector<TensorInfo> inputs(beginI, endI);
1225 
1226  auto getTensorInfoPtr = [](const TensorInfo& info)
1227  {
1228  return &info;
1229  };
1230  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1231  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1232  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1233 
1234  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1235 
1236  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1237 
1238  break;
1239  }
1240  case LayerType::StandIn:
1241  {
1242  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1243 
1244  // Get vector of all inputs.
1245  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1246  {
1247  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1248  };
1249  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1250  {
1251  return OverrideDataType(slot.GetTensorInfo(), dataType);
1252  };
1253  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1254  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1255  std::vector<TensorInfo> inputs(beginI, endI);
1256 
1257  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1258  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1259  std::vector<TensorInfo> outputs(beginO, endO);
1260 
1261 
1262  auto getTensorInfoPtr = [](const TensorInfo& info)
1263  {
1264  return &info;
1265  };
1266  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1267  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1268  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1269 
1270  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1271  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1272  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1273 
1274 
1275  result = layerSupportObject.IsStandInSupported(inputPtrs,
1276  outputPtrs,
1277  cLayer->GetParameters(),
1278  reason);
1279  break;
1280  }
1282  {
1283  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1284  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1285  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1286  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1287  OverrideDataType(output, dataType),
1288  cLayer->GetParameters(),
1289  reason);
1290  break;
1291  }
1293  {
1295  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1296  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1297  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1298  result = layerSupportObject.IsSubtractionSupported(
1299  OverrideDataType(input0, dataType),
1300  OverrideDataType(input1, dataType),
1301  OverrideDataType(output, dataType),
1302  reason);
1304  break;
1305  }
1306  case LayerType::Switch:
1307  {
1308  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1309  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1310  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1311  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1312  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1313  OverrideDataType(input1, dataType),
1314  OverrideDataType(output0, dataType),
1315  OverrideDataType(output1, dataType),
1316  reason);
1317  break;
1318  }
1319  case LayerType::Mean:
1320  {
1321  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1322  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1323  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1324  result = layerSupportObject.IsMeanSupported(
1325  OverrideDataType(input, dataType),
1326  OverrideDataType(output, dataType),
1327  cLayer->GetParameters(),
1328  reason);
1329  break;
1330  }
1331  case LayerType::Minimum:
1332  {
1334  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1335  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1336  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1337  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1338  OverrideDataType(input1, dataType),
1339  OverrideDataType(output, dataType),
1340  reason);
1342  break;
1343  }
1344  case LayerType::Prelu:
1345  {
1346  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1347  const TensorInfo& alpha = layer.GetInputSlot(1).GetTensorInfo();
1348  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1349  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1350  OverrideDataType(alpha, dataType),
1351  OverrideDataType(output, dataType),
1352  reason);
1353  break;
1354  }
1355  case LayerType::Tile:
1356  {
1357  auto cLayer = PolymorphicDowncast<const TileLayer*>(&layer);
1358  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1359  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1360 
1361  result = layerSupportObject.IsTileSupported(OverrideDataType(input, dataType),
1362  OverrideDataType(output, dataType),
1363  cLayer->GetParameters(),
1364  reason);
1365 
1366  break;
1367  }
1368  case LayerType::Transpose:
1369  {
1370  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1371  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1372  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1373  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1374  OverrideDataType(output, dataType),
1375  cLayer->GetParameters(),
1376  reason);
1377  break;
1378  }
1380  {
1381  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1382 
1383  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
1384  dataType);
1385  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1386 
1387  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1388 
1389  Optional<TensorInfo> biases;
1390  if (descriptor.m_BiasEnabled)
1391  {
1392  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1393  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1394  GetBiasTypeFromWeightsType(dataType));
1395  }
1396 
1397  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1398  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1399 
1400  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1401  output,
1402  descriptor,
1403  weights,
1404  biases,
1405  reason);
1406 
1407  break;
1408  }
1409  case LayerType::Reduce:
1410  {
1411  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1412  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1413  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1414 
1415  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1416  OverrideDataType(output, dataType),
1417  cLayer->GetParameters(),
1418  reason);
1419  break;
1420  }
1422  {
1423  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1424  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1425 
1426  // All inputs.
1427  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
1428  dataType);
1429  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
1430  dataType);
1431  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
1432  dataType);
1433  // Outputs
1434  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1435  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1436  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1437 
1438  // Basic parameters
1439  const TensorInfo& inputToForgetWeights
1440  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1441  const TensorInfo& inputToCellWeights
1442  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1443  const TensorInfo& inputToOutputWeights
1444  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1445  const TensorInfo& recurrentToForgetWeights
1446  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1447  const TensorInfo& recurrentToCellWeights
1448  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1449  const TensorInfo& recurrentToOutputWeights
1450  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1451  const TensorInfo& forgetGateBias
1452  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1453  const TensorInfo& cellBias
1454  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1455  const TensorInfo& outputGateBias
1456  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1457 
1458  LstmInputParamsInfo paramsInfo;
1459 
1460  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1461  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1462  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1463  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1464  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1465  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1466  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1467  paramsInfo.m_CellBias = &cellBias;
1468  paramsInfo.m_OutputGateBias = &outputGateBias;
1469 
1470  // Optional parameters
1471  TensorInfo optInputToInputWeights;
1472  TensorInfo optRecurrentToInputWeights;
1473  TensorInfo optCellToInputWeights;
1474  TensorInfo optInputGateBias;
1475  TensorInfo optProjectionWeights;
1476  TensorInfo optProjectionBias;
1477  TensorInfo optCellToForgetWeights;
1478  TensorInfo optCellToOutputWeights;
1479  TensorInfo optInputLayerNormWeights;
1480  TensorInfo optForgetLayerNormWeights;
1481  TensorInfo optCellLayerNormWeights;
1482  TensorInfo optOutputLayerNormWeights;
1483 
1484  if(!descriptor.m_CifgEnabled)
1485  {
1486  optInputToInputWeights =
1487  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1488  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1489 
1490  optRecurrentToInputWeights =
1491  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1492  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1493  optInputGateBias =
1494  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1495  paramsInfo.m_InputGateBias = &optInputGateBias;
1496  }
1497 
1498  if(descriptor.m_ProjectionEnabled)
1499  {
1500  optProjectionWeights =
1501  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1502  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1503  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1504  {
1505  optProjectionBias =
1506  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1507  paramsInfo.m_ProjectionBias = &optProjectionBias;
1508  }
1509  }
1510 
1511  if(descriptor.m_PeepholeEnabled)
1512  {
1513  if(!descriptor.m_CifgEnabled)
1514  {
1515  optCellToInputWeights =
1516  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1517  dataType);
1518  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1519  }
1520  optCellToForgetWeights =
1521  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1522  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1523  optCellToOutputWeights =
1524  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1525  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1526  }
1527 
1528  if(descriptor.m_LayerNormEnabled)
1529  {
1530  if (!descriptor.m_CifgEnabled)
1531  {
1532  optInputLayerNormWeights = OverrideDataType(
1533  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1534  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1535  }
1536 
1537  optForgetLayerNormWeights = OverrideDataType(
1538  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1539  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1540 
1541  optCellLayerNormWeights = OverrideDataType(
1542  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1543  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1544 
1545  optOutputLayerNormWeights = OverrideDataType(
1546  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1547  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1548  }
1549 
1550  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1551  outputStateIn,
1552  cellStateIn,
1553  outputStateOut,
1554  cellStateOut,
1555  output,
1556  descriptor,
1557  paramsInfo,
1558  reason);
1559  break;
1560  }
1561  default:
1562  {
1563  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1564  reason.value() = "Unrecognised layer type";
1565  result = false;
1566  break;
1567  }
1568  }
1569  return result;
1570 }
1571 
1573  const IConnectableLayer& connectableLayer,
1574  Optional<DataType> dataType,
1575  std::string& outReasonIfUnsupported)
1576 {
1577  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1578 }
1579 
1581  Optional<DataType> dataType,
1582  std::string& outReasonIfUnsupported)
1583 {
1584  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1585  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1586 }
1587 
1589  Optional<DataType> dataType,
1590  std::string& outReasonIfUnsupported,
1591  const ModelOptions& modelOptions)
1592 {
1593  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1594  return IsLayerConfigurationSupported(layer->GetBackendId(),
1595  connectableLayer,
1596  dataType,
1597  outReasonIfUnsupported,
1598  modelOptions);
1599 }
1600 
1602  const IConnectableLayer& connectableLayer,
1603  Optional<DataType> dataType,
1604  std::string& outReasonIfUnsupported,
1605  const ModelOptions& modelOptions)
1606 {
1607  return IsLayerConfigurationSupported(backendId,
1608  connectableLayer,
1609  dataType,
1610  outReasonIfUnsupported,
1611  modelOptions);
1612 }
1613 
1614 } // namepsace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
BackendHelper.hpp
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::DataType::Boolean
@ Boolean
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::UnidirectionalSequenceLstmDescriptor
LstmDescriptor UnidirectionalSequenceLstmDescriptor
Definition: Descriptors.hpp:1148
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::DataType::Float32
@ Float32
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:14
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::LayerType::Tile
@ Tile
armnn::MakeTransformIterator
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
Definition: TransformIterator.hpp:86
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::DataType::QSymmS8
@ QSymmS8
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
ILayerSupport.hpp
TransformIterator.hpp
armnn::DataType::QSymmS16
@ QSymmS16
WorkloadFactory.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::DataType::BFloat16
@ BFloat16
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::Slice
@ Slice
armnn::DataType::Float16
@ Float16
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Debug
@ Debug
IBackendInternal.hpp
armnn::LayerType::Softmax
@ Softmax
LayersFwd.hpp
armnn::BackendRegistryInstance
BackendRegistry & BackendRegistryInstance()
Definition: BackendRegistry.cpp:15
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1572
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::Switch
@ Switch
armnn::LayerType::Reshape
@ Reshape
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
Layer.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant