ArmNN
 24.02
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
18 
19 #include <sstream>
20 
21 namespace armnn
22 {
23 
24 namespace
25 {
26 using LayerList = std::list<Layer*>;
27 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
28 
29 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
30 {
31  if (!type)
32  {
33  return info;
34  }
35 
36  return TensorInfo(info.GetShape(),
37  type.value(),
38  info.GetQuantizationScale(),
39  info.GetQuantizationOffset(),
40  info.IsConstant());
41 }
42 
43 } // anonymous namespace
44 
46 {
47  if (!weightsType)
48  {
49  return weightsType;
50  }
51 
52  switch(weightsType.value())
53  {
57  return weightsType;
63  default:
64  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
65  }
66  return armnn::EmptyOptional();
67 }
68 
69 
70 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
71  const IConnectableLayer& connectableLayer,
72  Optional<DataType> dataType,
73  std::string& outReasonIfUnsupported,
74  const ModelOptions& modelOptions)
75 {
76  Optional<std::string&> reason = outReasonIfUnsupported;
77  bool result;
78  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
79 
80  auto const& backendRegistry = BackendRegistryInstance();
81  if (!backendRegistry.IsBackendRegistered(backendId))
82  {
83  std::stringstream ss;
84  ss << connectableLayer.GetName() << " is not supported on " << backendId
85  << " because this backend is not registered.";
86 
87  outReasonIfUnsupported = ss.str();
88  return false;
89  }
90 
91  auto backendFactory = backendRegistry.GetFactory(backendId);
92  auto backendObject = backendFactory();
93  auto layerSupport = backendObject->GetLayerSupport(modelOptions);
94  auto layerSupportObject = LayerSupportHandle(layerSupport, backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
113  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
114  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
115  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
116  result = layerSupportObject.IsAdditionSupported(
117  OverrideDataType(input0, dataType),
118  OverrideDataType(input1, dataType),
119  OverrideDataType(output, dataType),
120  reason);
122  break;
123  }
125  {
126  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
127  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
128 
129  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
130  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
131  result = layerSupportObject.IsArgMinMaxSupported(
132  OverrideDataType(input, dataType),
133  OverrideDataType(output, DataType::Signed32),
134  descriptor,
135  reason);
136  break;
137  }
139  {
140  auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
141  const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
142 
143  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
144  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
145  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
146  result = layerSupportObject.IsBatchMatMulSupported(
147  OverrideDataType(input0, dataType),
148  OverrideDataType(input1, dataType),
149  OverrideDataType(output, dataType),
150  descriptor,
151  reason);
152  break;
153  }
155  {
156  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
157  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
160  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
161  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
162  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
163  result = layerSupportObject.IsBatchNormalizationSupported(
164  OverrideDataType(input, dataType),
165  OverrideDataType(output, dataType),
166  OverrideDataType(mean, dataType),
167  OverrideDataType(var, dataType),
168  OverrideDataType(beta, dataType),
169  OverrideDataType(gamma, dataType),
170  cLayer->GetParameters(),
171  reason);
172  break;
173  }
175  {
176  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
177  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
178  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
179 
180  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
181  OverrideDataType(output, dataType),
182  cLayer->GetParameters(),
183  reason);
184  break;
185  }
187  {
188  auto cLayer = PolymorphicDowncast<const BroadcastToLayer*>(&layer);
189  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
190  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
191 
192  result = layerSupportObject.IsBroadcastToSupported(OverrideDataType(input, dataType),
193  OverrideDataType(output, dataType),
194  cLayer->GetParameters(),
195  reason);
196  break;
197  }
198  case LayerType::Cast:
199  {
200  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
201  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
202 
203  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
204  OverrideDataType(output, dataType),
205  reason);
206  break;
207  }
209  {
210  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
211 
212  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
213  const TensorInfo& output = layer.GetInputSlot(0).GetTensorInfo();
214 
215  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
216 
217  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
218  OverrideDataType(output, dataType),
219  descriptor,
220  reason);
221  break;
222  }
224  {
225  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
226 
227  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
228  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
229  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
230 
231  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
232  OverrideDataType(input1, dataType),
233  OverrideDataType(output, DataType::Boolean),
234  cLayer->GetParameters(),
235  reason);
236  break;
237  }
238  case LayerType::Constant:
239  {
240  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
241  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
242  break;
243  }
245  {
246  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
247  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
248  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
249  break;
250  }
252  {
253  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
254  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
255  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
256  break;
257  }
259  {
260  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
261 
262  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
263  dataType);
264  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
265  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
266  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
267  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
268  dataType);
269 
270  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
271 
272  // Construct optional biases object based on the value of m_BiasEnabled
273  Optional<TensorInfo> biases;
274  if (descriptor.m_BiasEnabled)
275  {
276  ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
277  "Convolution2dLayer: Bias should be connected as a Constant Layer.");
278  biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
279  GetBiasTypeFromWeightsType(dataType));
280  }
281 
282  result = layerSupportObject.IsConvolution2dSupported(
283  input,
284  output,
285  descriptor,
286  weights,
287  biases,
288  reason);
289  break;
290  }
292  {
293  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
294 
295  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
296  dataType);
297  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
298 
299  ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
300  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
301  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
302  dataType);
303 
304  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
305 
306  // Construct optional biases object based on the value of m_BiasEnabled
307  Optional<TensorInfo> biases;
308  if (descriptor.m_BiasEnabled)
309  {
310  biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
311  GetBiasTypeFromWeightsType(dataType));
312  }
313 
314  result = layerSupportObject.IsConvolution3dSupported(
315  input,
316  output,
317  descriptor,
318  weights,
319  biases,
320  reason);
321  break;
322  }
323  case LayerType::Debug:
324  {
325  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
326  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
327 
328  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
329  OverrideDataType(output, dataType),
330  reason);
331  break;
332  }
334  {
335  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
336 
337  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
338  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
339 
340  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
341  OverrideDataType(output, dataType),
342  cLayer->GetParameters(),
343  reason);
344  break;
345  }
347  {
348  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
349  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
350  dataType);
351  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
352  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
353  dataType);
354 
355  ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
356 
357  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
358 
359  // Construct optional biases object based on the value of m_BiasEnabled
360  Optional<TensorInfo> biases;
361  if (descriptor.m_BiasEnabled)
362  {
363  biases = OverrideDataType(cLayer->GetInputSlot(2).GetTensorInfo(),
364  GetBiasTypeFromWeightsType(dataType));
365  }
366 
367  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
368  output,
369  descriptor,
370  weights,
371  biases,
372  reason);
373  break;
374  }
376  {
377  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
378  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
379 
380  result = layerSupportObject.IsDequantizeSupported(input,
381  OverrideDataType(output, dataType),
382  reason);
383  break;
384  }
386  {
387  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
388  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetTensorInfo();
389  const TensorInfo& scores = layer.GetInputSlot(1).GetTensorInfo();
390  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
391 
392  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
393  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
394  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
395  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
396 
397  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
398  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
399  scores,
400  anchors,
401  detectionBoxes,
402  detectionClasses,
403  detectionScores,
404  numDetections,
405  descriptor,
406  reason);
407  break;
408  }
410  {
411  auto cLayer = PolymorphicDowncast<const ElementwiseBinaryLayer*>(&layer);
412 
413  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
414  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
415  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
416  std::vector<TensorInfo> infos = { OverrideDataType(input0, dataType),
417  OverrideDataType(input1, dataType),
418  OverrideDataType(output, dataType) };
419  result = layerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
420  infos,
421  cLayer->GetParameters(),
422  EmptyOptional(),
423  EmptyOptional(),
424  reason);
425  break;
426  }
428  {
429  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
430 
431  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
432  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
433 
434  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
435  OverrideDataType(output, dataType),
436  cLayer->GetParameters(),
437  reason);
438  break;
439  }
440  case LayerType::Fill:
441  {
442  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
443  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
444  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
445  const FillDescriptor& descriptor = cLayer->GetParameters();
446 
447  result = layerSupportObject.IsFillSupported(
448  OverrideDataType(input, dataType),
449  OverrideDataType(output, dataType),
450  descriptor,
451  reason);
452  break;
453  }
455  {
456  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
457  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
458  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
459  cLayer->GetParameters(),
460  reason);
461  break;
462  }
463  case LayerType::Floor:
464  {
465  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
466  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
467  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
468  OverrideDataType(output, dataType),
469  reason);
470  break;
471  }
473  {
474  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
475  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
476  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
477 
478  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
479  TensorInfo weightsInfo;
480  const TensorInfo* weightsInfoPtr = nullptr;
481 
482  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(), dataType);
483  weightsInfoPtr = &weightsInfo;
484 
485  TensorInfo biasInfo;
486  const TensorInfo* biasInfoPtr = nullptr;
487  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
488  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
489  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
490  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
491 
492  if (descriptor.m_BiasEnabled)
493  {
494  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(), dataType);
495  biasInfoPtr = &biasInfo;
496  }
497  else
498  {
499  // If biases are not enabled pass a dummy tensorinfo for the validation
500  switch(input.GetDataType())
501  {
502  case DataType::BFloat16:
503  {
504  biasInfoPtr = &dummyBFloat16Bias;
505  break;
506  }
507  case DataType::Float16:
508  {
509  biasInfoPtr = &dummyFloat16Bias;
510  break;
511  }
512  case DataType::Float32:
513  {
514  biasInfoPtr = &dummyFloat32Bias;
515  break;
516  }
517  case DataType::QAsymmU8:
518  case DataType::QAsymmS8:
519  case DataType::QSymmS8:
520  case DataType::QSymmS16:
521  {
522  biasInfoPtr = &dummyQA8Bias;
523  break;
524  }
525  default:
526  {
527  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
528  }
529  }
530  }
531  result = layerSupportObject.IsFullyConnectedSupported(
532  OverrideDataType(input, dataType),
533  OverrideDataType(output, dataType),
534  *weightsInfoPtr,
535  *biasInfoPtr,
536  descriptor,
537  reason);
538  break;
539  }
540  case LayerType::Fused:
541  {
542  auto cLayer = PolymorphicDowncast<const FusedLayer*>(&layer);
543 
544  // Get vector of all outputs.
545  auto getOutTensorInfo = [&dataType](const OutputSlot& slot)
546  {
547  return OverrideDataType(slot.GetTensorInfo(), dataType);
548  };
549  auto beginOutputs = MakeTransformIterator(layer.GetOutputSlots().begin(), getOutTensorInfo);
550  auto endOutputs = MakeTransformIterator(layer.GetOutputSlots().end(), getOutTensorInfo);
551  std::vector<TensorInfo> outputs(beginOutputs, endOutputs);
552  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
553 
554  // Get vector of all inputs.
555  auto getInputTensorInfo = [&dataType](const InputSlot& slot)
556  {
557  return OverrideDataType(slot.GetTensorInfo(), dataType);
558  };
559  auto beginInputs = MakeTransformIterator(layer.GetInputSlots().begin(), getInputTensorInfo);
560  auto endInputs = MakeTransformIterator(layer.GetInputSlots().end(), getInputTensorInfo);
561  std::vector<TensorInfo> inputs(beginInputs, endInputs);
562  const std::vector<std::reference_wrapper<TensorInfo>> inputPtrs(inputs.begin(), inputs.end());
563 
564  result = layerSupportObject.IsFusedSupported(inputPtrs,
565  outputPtrs,
566  cLayer->GetParameters(),
567  reason);
568  break;
569  }
570  case LayerType::Gather:
571  {
572  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
573  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
574  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
575  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
576  const GatherDescriptor& descriptor = cLayer->GetParameters();
577  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
578  input1,
579  OverrideDataType(output, dataType),
580  descriptor,
581  reason);
582  break;
583  }
584  case LayerType::GatherNd:
585  {
586  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
587  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
588  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
589  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
590  input1,
591  OverrideDataType(output, dataType),
592  reason);
593  break;
594  }
595  case LayerType::Input:
596  {
597  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
598  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
599  break;
600  }
602  {
603  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
604  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
605 
606  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
607  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
608 
609  result = layerSupportObject.IsInstanceNormalizationSupported(
610  OverrideDataType(input, dataType),
611  OverrideDataType(output, dataType),
612  descriptor,
613  reason);
614  break;
615  }
617  {
618  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
619  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
620 
621  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
622  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
623 
624  result = layerSupportObject.IsL2NormalizationSupported(
625  OverrideDataType(input, dataType),
626  OverrideDataType(output, dataType),
627  descriptor,
628  reason);
629  break;
630  }
632  {
633  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
634 
635  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
636  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
637  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
638 
639  result = layerSupportObject.IsLogicalBinarySupported(input0,
640  input1,
641  output,
642  cLayer->GetParameters(),
643  reason);
644  break;
645  }
647  {
648  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
649 
650  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
651  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
652 
653  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
654  OverrideDataType(output, dataType),
655  cLayer->GetParameters(),
656  reason);
657  break;
658  }
659  case LayerType::Lstm:
660  {
661  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
662  const LstmDescriptor& descriptor = cLayer->GetParameters();
663 
664  // All inputs.
665  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
666  dataType);
667  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
668  dataType);
669  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
670  dataType);
671  // All outputs
672  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
673  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
674  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
675  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
676 
677  // Basic parameters
678  const TensorInfo& inputToForgetWeights
679  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
680  const TensorInfo& inputToCellWeights
681  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
682  const TensorInfo& inputToOutputWeights
683  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
684  const TensorInfo& recurrentToForgetWeights
685  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
686  const TensorInfo& recurrentToCellWeights
687  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
688  const TensorInfo& recurrentToOutputWeights
689  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
690  const TensorInfo& forgetGateBias
691  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
692  const TensorInfo& cellBias
693  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
694  const TensorInfo& outputGateBias
695  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
696 
697  LstmInputParamsInfo paramsInfo;
698 
699  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
700  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
701  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
702  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
703  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
704  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
705  paramsInfo.m_ForgetGateBias = &forgetGateBias;
706  paramsInfo.m_CellBias = &cellBias;
707  paramsInfo.m_OutputGateBias = &outputGateBias;
708 
709 
710  // Optional parameters
711  TensorInfo optInputToInputWeights;
712  TensorInfo optRecurrentToInputWeights;
713  TensorInfo optCellToInputWeights;
714  TensorInfo optInputGateBias;
715  TensorInfo optProjectionWeights;
716  TensorInfo optProjectionBias;
717  TensorInfo optCellToForgetWeights;
718  TensorInfo optCellToOutputWeights;
719  TensorInfo optInputLayerNormWeights;
720  TensorInfo optForgetLayerNormWeights;
721  TensorInfo optCellLayerNormWeights;
722  TensorInfo optOutputLayerNormWeights;
723 
724  if(!descriptor.m_CifgEnabled)
725  {
726  optInputToInputWeights =
727  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
728  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
729 
730  optRecurrentToInputWeights =
731  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
732  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
733  optInputGateBias =
734  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
735  paramsInfo.m_InputGateBias = &optInputGateBias;
736  }
737 
738  if(descriptor.m_ProjectionEnabled)
739  {
740  optProjectionWeights =
741  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
742  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
743  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
744  {
745  optProjectionBias =
746  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
747  paramsInfo.m_ProjectionBias = &optProjectionBias;
748  }
749  }
750 
751  if(descriptor.m_PeepholeEnabled)
752  {
753  if(!descriptor.m_CifgEnabled)
754  {
755  optCellToInputWeights =
756  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
757  dataType);
758  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
759  }
760  optCellToForgetWeights =
761  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
762  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
763  optCellToOutputWeights =
764  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
765  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
766  }
767 
768  if(descriptor.m_LayerNormEnabled)
769  {
770  if (!descriptor.m_CifgEnabled)
771  {
772  optInputLayerNormWeights = OverrideDataType(
773  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
774  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
775  }
776 
777  optForgetLayerNormWeights = OverrideDataType(
778  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
779  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
780 
781  optCellLayerNormWeights = OverrideDataType(
782  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
783  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
784 
785  optOutputLayerNormWeights = OverrideDataType(
786  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
787  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
788  }
789 
790  result = layerSupportObject.IsLstmSupported(
791  input,
792  outputStateIn,
793  cellStateIn,
794  scratchBuffer,
795  outputStateOut,
796  cellStateOut,
797  output,
798  descriptor,
799  paramsInfo,
800  reason);
801  break;
802  }
803  case LayerType::Maximum:
804  {
806  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
807  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
808  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
809 
810  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
811  OverrideDataType(input1, dataType),
812  OverrideDataType(output, dataType),
813  reason);
815  break;
816  }
817  case LayerType::MemCopy:
818  {
819  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
820  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
821 
822  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
823  OverrideDataType(output, dataType),
824  reason);
825  break;
826  }
828  {
829  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
830  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
831 
832  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
833  OverrideDataType(output, dataType),
834  reason);
835  break;
836  }
837  case LayerType::Merge:
838  {
839  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
840  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
841  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
842 
843  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
844  OverrideDataType(input1, dataType),
845  OverrideDataType(output, dataType),
846  reason);
847  break;
848  }
849  case LayerType::Concat:
850  {
851  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
852 
853  // Get vector of all inputs.
854  auto getTensorInfo = [&dataType](const InputSlot& slot)
855  {
856  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
857  };
858 
859  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
860  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
861  std::vector<TensorInfo> inputs(beginI, endI);
862 
863  auto getTensorInfoPtr = [](const TensorInfo& info)
864  {
865  return &info;
866  };
867 
868  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
869  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
870  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
871 
872  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
873 
874  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
875 
876 
877  break;
878  }
880  {
882  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
883  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
884  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
885  result = layerSupportObject.IsMultiplicationSupported(
886  OverrideDataType(input0, dataType),
887  OverrideDataType(input1, dataType),
888  OverrideDataType(output, dataType),
889  reason);
891  break;
892  }
894  {
895  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
896  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
897  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
898  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
899  OverrideDataType(output, dataType),
900  cLayer->GetParameters(),
901  reason);
902  break;
903  }
904  case LayerType::Output:
905  {
906  const TensorInfo& output = layer.GetInputSlot(0).GetTensorInfo();
907  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
908  break;
909  }
910  case LayerType::Permute:
911  {
912  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
913  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
914  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
915  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
916  OverrideDataType(output, dataType),
917  cLayer->GetParameters(),
918  reason);
919  break;
920  }
921  case LayerType::Pad:
922  {
923  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
924  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
925  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
926  result = layerSupportObject.IsPadSupported(
927  OverrideDataType(input, dataType),
928  OverrideDataType(output, dataType),
929  cLayer->GetParameters(),
930  reason);
931  break;
932  }
934  {
935  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
936  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
937  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
938  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
939  OverrideDataType(output, dataType),
940  cLayer->GetParameters(),
941  reason);
942  break;
943  }
945  {
946  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
947  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
948  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
949  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
950  OverrideDataType(output, dataType),
951  cLayer->GetParameters(),
952  reason);
953  break;
954  }
956  {
957  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
958  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
959  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
960  cLayer->GetParameters(),
961  reason);
962  break;
963  }
964  case LayerType::Quantize:
965  {
966  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
967  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
968  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
969  break;
970  }
971  case LayerType::QLstm:
972  {
973  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
974  const QLstmDescriptor& descriptor = cLayer->GetParameters();
975 
976  // Inputs
977  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
978  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetTensorInfo();
979  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetTensorInfo();
980 
981  // Outputs
982  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
983  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
984  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
985 
986  // Lstm parameters
987  LstmInputParamsInfo paramsInfo;
988 
989  // Basic parameters
990  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
991  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
992  ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
993  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
994  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
995  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
996 
997  paramsInfo.m_RecurrentToForgetWeights =
998  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
999  paramsInfo.m_RecurrentToCellWeights =
1000  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
1001  paramsInfo.m_RecurrentToOutputWeights =
1002  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1003 
1004  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
1005  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
1006  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
1007 
1008  if(!descriptor.m_CifgEnabled)
1009  {
1010  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
1011  paramsInfo.m_RecurrentToInputWeights =
1012  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
1013  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
1014  }
1015 
1016  if(descriptor.m_ProjectionEnabled)
1017  {
1018  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
1019 
1020  // Projection bias is optional even if projection is enabled
1021  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1022  {
1023  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
1024  }
1025  }
1026 
1027  if(descriptor.m_PeepholeEnabled)
1028  {
1029  if (!descriptor.m_CifgEnabled)
1030  {
1031  paramsInfo.m_CellToInputWeights =
1032  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
1033  }
1034 
1035  paramsInfo.m_CellToForgetWeights =
1036  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
1037  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
1038  }
1039 
1040  if(descriptor.m_LayerNormEnabled)
1041  {
1042  if (!descriptor.m_CifgEnabled)
1043  {
1044  paramsInfo.m_InputLayerNormWeights =
1045  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
1046  }
1047 
1048  paramsInfo.m_ForgetLayerNormWeights =
1049  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
1050  paramsInfo.m_CellLayerNormWeights =
1051  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
1052  paramsInfo.m_OutputLayerNormWeights =
1053  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
1054  }
1055 
1056  result = layerSupportObject.IsQLstmSupported(input,
1057  previousOutputIn,
1058  previousCellStateIn,
1059  outputStateOut,
1060  cellStateOut,
1061  output,
1062  descriptor,
1063  paramsInfo,
1064  reason);
1065  break;
1066  }
1068  {
1069  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1070 
1071  // Inputs
1072  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1073  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetTensorInfo();
1074  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetTensorInfo();
1075 
1076  // Outputs
1077  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1078  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1079 
1080  // QuantizedLstm parameters
1081  QuantizedLstmInputParamsInfo paramsInfo;
1082 
1083  paramsInfo.m_InputToInputWeights =
1084  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1085  paramsInfo.m_InputToForgetWeights =
1086  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1087  paramsInfo.m_InputToCellWeights =
1088  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1089  paramsInfo.m_InputToOutputWeights =
1090  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1091 
1092  paramsInfo.m_RecurrentToInputWeights =
1093  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1094  paramsInfo.m_RecurrentToForgetWeights =
1095  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1096  paramsInfo.m_RecurrentToCellWeights =
1097  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1098  paramsInfo.m_RecurrentToOutputWeights =
1099  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1100 
1101  paramsInfo.m_InputGateBias =
1102  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1103  paramsInfo.m_ForgetGateBias =
1104  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1105  paramsInfo.m_CellBias =
1106  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1107  paramsInfo.m_OutputGateBias =
1108  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1109 
1110  result = layerSupportObject.IsQuantizedLstmSupported(input,
1111  previousCellStateIn,
1112  previousOutputIn,
1113  cellStateOut,
1114  output,
1115  paramsInfo,
1116  reason);
1117  break;
1118  }
1119  case LayerType::Division:
1120  {
1122  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1123  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1124  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1125  result = layerSupportObject.IsDivisionSupported(
1126  OverrideDataType(input0, dataType),
1127  OverrideDataType(input1, dataType),
1128  OverrideDataType(output, dataType),
1129  reason);
1131  break;
1132  }
1133  case LayerType::Rank:
1134  {
1135  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1136  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1137  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1138  OverrideDataType(output, dataType),
1139  reason);
1140  break;
1141  }
1142  case LayerType::Reshape:
1143  {
1144  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1145  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1146  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1147  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1148  OverrideDataType(output, dataType),
1149  cLayer->GetParameters(),
1150  reason);
1151  break;
1152  }
1153  case LayerType::Resize:
1154  {
1155  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1156  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1157  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1158  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1159  OverrideDataType(output, dataType),
1160  cLayer->GetParameters(),
1161  reason);
1162  break;
1163  }
1164  case LayerType::ReverseV2:
1165  {
1166  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1167  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1168  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1169  result = layerSupportObject.IsReverseV2Supported(OverrideDataType(input0, dataType),
1170  OverrideDataType(input1, armnn::DataType::Signed32),
1171  OverrideDataType(output, dataType),
1172  reason);
1173  break;
1174  }
1175  case LayerType::Shape:
1176  {
1177  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1178  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1179 
1180  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1181  OverrideDataType(output, dataType),
1182  reason);
1183  break;
1184  }
1185  case LayerType::Slice:
1186  {
1187  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1188 
1189  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1190  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1191 
1192  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1193  OverrideDataType(output, dataType),
1194  cLayer->GetParameters(),
1195  reason);
1196  break;
1197  }
1198  case LayerType::Softmax:
1199  {
1200  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1201  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1202  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1203  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1204  OverrideDataType(output, dataType),
1205  cLayer->GetParameters(),
1206  reason);
1207  break;
1208  }
1210  {
1211  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1212  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1213  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1214  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1215  OverrideDataType(output, dataType),
1216  cLayer->GetParameters(),
1217  reason);
1218  break;
1219  }
1221  {
1222  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1223 
1224  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1225  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1226 
1227  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1228  OverrideDataType(output, dataType),
1229  cLayer->GetParameters(),
1230  reason);
1231  break;
1232  }
1233  case LayerType::Splitter:
1234  {
1235  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1236  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1237 
1238  // Get vector of all outputs.
1239  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1240  {
1241  return OverrideDataType(slot.GetTensorInfo(), dataType);
1242  };
1243  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1244  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1245  std::vector<TensorInfo> outputs(beginI, endI);
1246 
1247  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1248 
1249  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1250  outputPtrs,
1251  cLayer->GetParameters(),
1252  reason);
1253  break;
1254  }
1255  case LayerType::Stack:
1256  {
1257  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1258 
1259  // Get vector of all inputs.
1260  auto getTensorInfo = [&dataType](const InputSlot& slot)
1261  {
1262  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1263  };
1264  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1265  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1266  std::vector<TensorInfo> inputs(beginI, endI);
1267 
1268  auto getTensorInfoPtr = [](const TensorInfo& info)
1269  {
1270  return &info;
1271  };
1272  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1273  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1274  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1275 
1276  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1277 
1278  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1279 
1280  break;
1281  }
1282  case LayerType::StandIn:
1283  {
1284  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1285 
1286  // Get vector of all inputs.
1287  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1288  {
1289  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1290  };
1291  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1292  {
1293  return OverrideDataType(slot.GetTensorInfo(), dataType);
1294  };
1295  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1296  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1297  std::vector<TensorInfo> inputs(beginI, endI);
1298 
1299  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1300  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1301  std::vector<TensorInfo> outputs(beginO, endO);
1302 
1303 
1304  auto getTensorInfoPtr = [](const TensorInfo& info)
1305  {
1306  return &info;
1307  };
1308  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1309  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1310  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1311 
1312  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1313  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1314  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1315 
1316 
1317  result = layerSupportObject.IsStandInSupported(inputPtrs,
1318  outputPtrs,
1319  cLayer->GetParameters(),
1320  reason);
1321  break;
1322  }
1324  {
1325  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1326  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1327  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1328  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1329  OverrideDataType(output, dataType),
1330  cLayer->GetParameters(),
1331  reason);
1332  break;
1333  }
1335  {
1337  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1338  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1339  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1340  result = layerSupportObject.IsSubtractionSupported(
1341  OverrideDataType(input0, dataType),
1342  OverrideDataType(input1, dataType),
1343  OverrideDataType(output, dataType),
1344  reason);
1346  break;
1347  }
1348  case LayerType::Switch:
1349  {
1350  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1351  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1352  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1353  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1354  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1355  OverrideDataType(input1, dataType),
1356  OverrideDataType(output0, dataType),
1357  OverrideDataType(output1, dataType),
1358  reason);
1359  break;
1360  }
1361  case LayerType::Mean:
1362  {
1363  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1364  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1365  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1366  result = layerSupportObject.IsMeanSupported(
1367  OverrideDataType(input, dataType),
1368  OverrideDataType(output, dataType),
1369  cLayer->GetParameters(),
1370  reason);
1371  break;
1372  }
1373  case LayerType::Minimum:
1374  {
1376  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1377  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1378  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1379  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1380  OverrideDataType(input1, dataType),
1381  OverrideDataType(output, dataType),
1382  reason);
1384  break;
1385  }
1386  case LayerType::Prelu:
1387  {
1388  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1389  const TensorInfo& alpha = layer.GetInputSlot(1).GetTensorInfo();
1390  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1391  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1392  OverrideDataType(alpha, dataType),
1393  OverrideDataType(output, dataType),
1394  reason);
1395  break;
1396  }
1397  case LayerType::Tile:
1398  {
1399  auto cLayer = PolymorphicDowncast<const TileLayer*>(&layer);
1400  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1401  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1402 
1403  result = layerSupportObject.IsTileSupported(OverrideDataType(input, dataType),
1404  OverrideDataType(output, dataType),
1405  cLayer->GetParameters(),
1406  reason);
1407 
1408  break;
1409  }
1410  case LayerType::Transpose:
1411  {
1412  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1413  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1414  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1415  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1416  OverrideDataType(output, dataType),
1417  cLayer->GetParameters(),
1418  reason);
1419  break;
1420  }
1422  {
1423  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1424 
1425  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
1426  dataType);
1427  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1428 
1429  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1430 
1431  Optional<TensorInfo> biases;
1432  if (descriptor.m_BiasEnabled)
1433  {
1434  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1435  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1436  GetBiasTypeFromWeightsType(dataType));
1437  }
1438 
1439  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1440  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1441 
1442  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1443  output,
1444  descriptor,
1445  weights,
1446  biases,
1447  reason);
1448 
1449  break;
1450  }
1451  case LayerType::Reduce:
1452  {
1453  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1454  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1455  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1456 
1457  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1458  OverrideDataType(output, dataType),
1459  cLayer->GetParameters(),
1460  reason);
1461  break;
1462  }
1464  {
1465  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1466  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1467 
1468  // All inputs.
1469  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
1470  dataType);
1471  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
1472  dataType);
1473  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
1474  dataType);
1475  // Outputs
1476  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1477  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1478  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1479 
1480  // Basic parameters
1481  const TensorInfo& inputToForgetWeights
1482  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1483  const TensorInfo& inputToCellWeights
1484  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1485  const TensorInfo& inputToOutputWeights
1486  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1487  const TensorInfo& recurrentToForgetWeights
1488  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1489  const TensorInfo& recurrentToCellWeights
1490  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1491  const TensorInfo& recurrentToOutputWeights
1492  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1493  const TensorInfo& forgetGateBias
1494  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1495  const TensorInfo& cellBias
1496  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1497  const TensorInfo& outputGateBias
1498  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1499 
1500  LstmInputParamsInfo paramsInfo;
1501 
1502  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1503  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1504  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1505  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1506  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1507  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1508  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1509  paramsInfo.m_CellBias = &cellBias;
1510  paramsInfo.m_OutputGateBias = &outputGateBias;
1511 
1512  // Optional parameters
1513  TensorInfo optInputToInputWeights;
1514  TensorInfo optRecurrentToInputWeights;
1515  TensorInfo optCellToInputWeights;
1516  TensorInfo optInputGateBias;
1517  TensorInfo optProjectionWeights;
1518  TensorInfo optProjectionBias;
1519  TensorInfo optCellToForgetWeights;
1520  TensorInfo optCellToOutputWeights;
1521  TensorInfo optInputLayerNormWeights;
1522  TensorInfo optForgetLayerNormWeights;
1523  TensorInfo optCellLayerNormWeights;
1524  TensorInfo optOutputLayerNormWeights;
1525 
1526  if(!descriptor.m_CifgEnabled)
1527  {
1528  optInputToInputWeights =
1529  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1530  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1531 
1532  optRecurrentToInputWeights =
1533  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1534  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1535  optInputGateBias =
1536  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1537  paramsInfo.m_InputGateBias = &optInputGateBias;
1538  }
1539 
1540  if(descriptor.m_ProjectionEnabled)
1541  {
1542  optProjectionWeights =
1543  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1544  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1545  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1546  {
1547  optProjectionBias =
1548  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1549  paramsInfo.m_ProjectionBias = &optProjectionBias;
1550  }
1551  }
1552 
1553  if(descriptor.m_PeepholeEnabled)
1554  {
1555  if(!descriptor.m_CifgEnabled)
1556  {
1557  optCellToInputWeights =
1558  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1559  dataType);
1560  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1561  }
1562  optCellToForgetWeights =
1563  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1564  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1565  optCellToOutputWeights =
1566  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1567  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1568  }
1569 
1570  if(descriptor.m_LayerNormEnabled)
1571  {
1572  if (!descriptor.m_CifgEnabled)
1573  {
1574  optInputLayerNormWeights = OverrideDataType(
1575  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1576  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1577  }
1578 
1579  optForgetLayerNormWeights = OverrideDataType(
1580  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1581  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1582 
1583  optCellLayerNormWeights = OverrideDataType(
1584  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1585  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1586 
1587  optOutputLayerNormWeights = OverrideDataType(
1588  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1589  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1590  }
1591 
1592  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1593  outputStateIn,
1594  cellStateIn,
1595  outputStateOut,
1596  cellStateOut,
1597  output,
1598  descriptor,
1599  paramsInfo,
1600  reason);
1601  break;
1602  }
1603  default:
1604  {
1605  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1606  reason.value() = "Unrecognised layer type";
1607  result = false;
1608  break;
1609  }
1610  }
1611  return result;
1612 }
1613 
1615  const IConnectableLayer& connectableLayer,
1616  Optional<DataType> dataType,
1617  std::string& outReasonIfUnsupported)
1618 {
1619  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1620 }
1621 
1623  Optional<DataType> dataType,
1624  std::string& outReasonIfUnsupported)
1625 {
1626  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1627  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1628 }
1629 
1631  Optional<DataType> dataType,
1632  std::string& outReasonIfUnsupported,
1633  const ModelOptions& modelOptions)
1634 {
1635  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1636  return IsLayerConfigurationSupported(layer->GetBackendId(),
1637  connectableLayer,
1638  dataType,
1639  outReasonIfUnsupported,
1640  modelOptions);
1641 }
1642 
1644  const IConnectableLayer& connectableLayer,
1645  Optional<DataType> dataType,
1646  std::string& outReasonIfUnsupported,
1647  const ModelOptions& modelOptions)
1648 {
1649  return IsLayerConfigurationSupported(backendId,
1650  connectableLayer,
1651  dataType,
1652  outReasonIfUnsupported,
1653  modelOptions);
1654 }
1655 
1656 } // namepsace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
BackendHelper.hpp
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::DataType::Boolean
@ Boolean
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::UnidirectionalSequenceLstmDescriptor
LstmDescriptor UnidirectionalSequenceLstmDescriptor
Definition: Descriptors.hpp:1169
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::DataType::Float32
@ Float32
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:14
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::LayerType::Tile
@ Tile
armnn::MakeTransformIterator
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
Definition: TransformIterator.hpp:90
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::DataType::QSymmS8
@ QSymmS8
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
ILayerSupport.hpp
TransformIterator.hpp
armnn::DataType::QSymmS16
@ QSymmS16
WorkloadFactory.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::DataType::BFloat16
@ BFloat16
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::Slice
@ Slice
armnn::DataType::Float16
@ Float16
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Debug
@ Debug
IBackendInternal.hpp
armnn::LayerType::Softmax
@ Softmax
LayersFwd.hpp
armnn::BackendRegistryInstance
BackendRegistry & BackendRegistryInstance()
Definition: BackendRegistry.cpp:15
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::Switch
@ Switch
armnn::LayerType::Reshape
@ Reshape
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::Fused
@ Fused
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
Layer.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant