28 using LayerList = std::list<Layer*>;
29 using Iterator = LayerList::const_iterator;
31 const TensorInfo OverrideDataType(
const TensorInfo& info, Optional<DataType> type)
38 return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
43 bool IWorkloadFactory::IsLayerConfigurationSupported(
const BackendId& backendId,
44 const IConnectableLayer& connectableLayer,
45 Optional<DataType> dataType,
46 std::string& outReasonIfUnsupported,
49 Optional<std::string&> reason = outReasonIfUnsupported;
51 const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
54 if (!backendRegistry.IsBackendRegistered(backendId))
57 ss << connectableLayer.GetName() <<
" is not supported on " << backendId
58 <<
" because this backend is not registered.";
60 outReasonIfUnsupported = ss.str();
64 auto backendFactory = backendRegistry.GetFactory(backendId);
65 auto backendObject = backendFactory();
66 auto layerSupportObject = backendObject->GetLayerSupport(modelOptions);
68 switch(layer.GetType())
72 auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
73 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
74 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
75 result = layerSupportObject->IsActivationSupported(
76 OverrideDataType(input, dataType),
77 OverrideDataType(output, dataType),
78 cLayer->GetParameters(),
84 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
85 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
86 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
87 result = layerSupportObject->IsAdditionSupported(
88 OverrideDataType(input0, dataType),
89 OverrideDataType(input1, dataType),
90 OverrideDataType(output, dataType),
96 auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
97 const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
100 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
101 result = layerSupportObject->IsArgMinMaxSupported(
102 OverrideDataType(input, dataType),
110 auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
111 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
112 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
113 const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
114 const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
115 const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
116 const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
117 result = layerSupportObject->IsBatchNormalizationSupported(
118 OverrideDataType(input, dataType),
119 OverrideDataType(output, dataType),
120 OverrideDataType(mean, dataType),
121 OverrideDataType(var, dataType),
122 OverrideDataType(beta, dataType),
123 OverrideDataType(gamma, dataType),
124 cLayer->GetParameters(),
130 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
131 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
132 auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
134 result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
135 OverrideDataType(output, dataType),
136 cLayer->GetParameters(),
142 auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
144 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
145 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
146 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148 result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
149 OverrideDataType(input1, dataType),
151 cLayer->GetParameters(),
157 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
158 result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
163 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
164 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
165 result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason);
170 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
171 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
172 result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
177 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
178 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
179 result = layerSupportObject->IsConvertFp32ToBf16Supported(input, output, reason);
184 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
185 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
186 result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
191 auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
193 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
195 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
198 const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
201 Optional<TensorInfo> biases;
202 if (descriptor.m_BiasEnabled)
208 result = layerSupportObject->IsConvolution2dSupported(
212 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
219 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
220 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
222 result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
223 OverrideDataType(output, dataType),
229 auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
231 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
232 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
234 result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
235 OverrideDataType(output, dataType),
236 cLayer->GetParameters(),
242 auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
243 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
245 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
248 const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
251 Optional<TensorInfo> biases;
252 if (descriptor.m_BiasEnabled)
258 result = layerSupportObject->IsDepthwiseConvolutionSupported(
262 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
269 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
270 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
272 result = layerSupportObject->IsDequantizeSupported(input,
273 OverrideDataType(output, dataType),
279 auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
280 const TensorInfo&
boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
281 const TensorInfo&
scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
282 const TensorInfo&
anchors = cLayer->m_Anchors->GetTensorInfo();
284 const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
285 const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
286 const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
287 const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
289 const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
290 result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
303 auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
305 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
306 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
308 result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
309 OverrideDataType(output, dataType),
310 cLayer->GetParameters(),
316 auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
317 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
318 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
319 const FillDescriptor& descriptor = cLayer->GetParameters();
321 result = layerSupportObject->IsFillSupported(
322 OverrideDataType(input, dataType),
323 OverrideDataType(output, dataType),
330 auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
331 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
332 result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
333 cLayer->GetParameters(),
339 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
340 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
341 result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
342 OverrideDataType(output, dataType),
348 auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
349 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
350 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
354 const TensorInfo * biasInfoPtr =
nullptr;
355 static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}),
DataType::BFloat16);
356 static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}),
DataType::Float16);
357 static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}),
DataType::Float32);
360 const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
361 if (descriptor.m_BiasEnabled)
365 biasInfoPtr = &biasInfo;
370 switch(input.GetDataType())
374 biasInfoPtr = &dummyBFloat16Bias;
379 biasInfoPtr = &dummyFloat16Bias;
384 biasInfoPtr = &dummyFloat32Bias;
392 biasInfoPtr = &dummyQA8Bias;
402 result = layerSupportObject->IsFullyConnectedSupported(
403 OverrideDataType(input, dataType),
404 OverrideDataType(output, dataType),
405 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
413 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
414 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
415 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
416 auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
417 const GatherDescriptor& descriptor = cLayer->GetParameters();
418 result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
420 OverrideDataType(output, dataType),
427 const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
428 result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
433 auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
434 const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
436 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
437 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
439 result = layerSupportObject->IsInstanceNormalizationSupported(
440 OverrideDataType(input, dataType),
441 OverrideDataType(output, dataType),
448 auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
449 const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
451 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
452 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
454 result = layerSupportObject->IsL2NormalizationSupported(
455 OverrideDataType(input, dataType),
456 OverrideDataType(output, dataType),
463 auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
465 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
466 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
467 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
469 result = layerSupportObject->IsLogicalBinarySupported(input0,
472 cLayer->GetParameters(),
478 auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
480 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
481 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
483 result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
484 OverrideDataType(output, dataType),
485 cLayer->GetParameters(),
491 auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
492 const LstmDescriptor& descriptor = cLayer->GetParameters();
495 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
497 const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
499 const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
502 const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
503 const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
504 const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
505 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
508 const TensorInfo& inputToForgetWeights
509 = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
510 const TensorInfo& inputToCellWeights
511 = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
512 const TensorInfo& inputToOutputWeights
513 = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
514 const TensorInfo& recurrentToForgetWeights
515 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
516 const TensorInfo& recurrentToCellWeights
517 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
518 const TensorInfo& recurrentToOutputWeights
519 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
520 const TensorInfo& forgetGateBias
521 = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
522 const TensorInfo& cellBias
523 = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
524 const TensorInfo& outputGateBias
525 = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
527 LstmInputParamsInfo paramsInfo;
529 paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
530 paramsInfo.m_InputToCellWeights = &inputToCellWeights;
531 paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
532 paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
533 paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
534 paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
535 paramsInfo.m_ForgetGateBias = &forgetGateBias;
536 paramsInfo.m_CellBias = &cellBias;
537 paramsInfo.m_OutputGateBias = &outputGateBias;
541 TensorInfo optInputToInputWeights;
542 TensorInfo optRecurrentToInputWeights;
543 TensorInfo optCellToInputWeights;
544 TensorInfo optInputGateBias;
545 TensorInfo optProjectionWeights;
546 TensorInfo optProjectionBias;
547 TensorInfo optCellToForgetWeights;
548 TensorInfo optCellToOutputWeights;
549 TensorInfo optInputLayerNormWeights;
550 TensorInfo optForgetLayerNormWeights;
551 TensorInfo optCellLayerNormWeights;
552 TensorInfo optOutputLayerNormWeights;
554 if(!descriptor.m_CifgEnabled)
556 optInputToInputWeights =
557 OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
558 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
560 optRecurrentToInputWeights =
561 OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
562 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
564 OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
565 paramsInfo.m_InputGateBias = &optInputGateBias;
568 if(descriptor.m_ProjectionEnabled)
570 optProjectionWeights =
571 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
572 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
573 if (cLayer->m_ProjectionParameters.m_ProjectionBias !=
nullptr)
576 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
577 paramsInfo.m_ProjectionBias = &optProjectionBias;
581 if(descriptor.m_PeepholeEnabled)
583 if(!descriptor.m_CifgEnabled)
585 optCellToInputWeights =
586 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
588 paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
590 optCellToForgetWeights =
591 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
592 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
593 optCellToOutputWeights =
594 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
595 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
598 if(descriptor.m_LayerNormEnabled)
600 if (!descriptor.m_CifgEnabled)
602 optInputLayerNormWeights = OverrideDataType(
603 cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
604 paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
607 optForgetLayerNormWeights = OverrideDataType(
608 cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
609 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
611 optCellLayerNormWeights = OverrideDataType(
612 cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
613 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
615 optOutputLayerNormWeights = OverrideDataType(
616 cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
617 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
620 result = layerSupportObject->IsLstmSupported(
635 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
636 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
637 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
639 result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
640 OverrideDataType(input1, dataType),
641 OverrideDataType(output, dataType),
647 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
648 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
650 result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
651 OverrideDataType(output, dataType),
657 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
658 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
660 result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
661 OverrideDataType(output, dataType),
667 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
668 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
669 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
671 result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
672 OverrideDataType(input1, dataType),
673 OverrideDataType(output, dataType),
679 auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
682 auto getTensorInfo = [&dataType](
const InputSlot& slot)
684 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
689 std::vector<TensorInfo> inputs(beginI, endI);
691 auto getTensorInfoPtr = [](
const TensorInfo&
info)
698 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
700 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
702 result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
709 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
710 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
711 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
712 result = layerSupportObject->IsMultiplicationSupported(
713 OverrideDataType(input0, dataType),
714 OverrideDataType(input1, dataType),
715 OverrideDataType(output, dataType),
721 auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
722 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
723 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
724 result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
725 OverrideDataType(output, dataType),
726 cLayer->GetParameters(),
732 const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
733 result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
738 auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
739 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
740 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
741 result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
742 OverrideDataType(output, dataType),
743 cLayer->GetParameters(),
749 auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
750 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
751 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
752 result = layerSupportObject->IsPadSupported(
753 OverrideDataType(input, dataType),
754 OverrideDataType(output, dataType),
755 cLayer->GetParameters(),
761 auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
762 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
763 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
764 result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
765 OverrideDataType(output, dataType),
766 cLayer->GetParameters(),
772 auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
773 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
774 result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
775 cLayer->GetParameters(),
781 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
782 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
783 result = layerSupportObject->IsQuantizeSupported(input, output, reason);
788 auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
789 const QLstmDescriptor& descriptor = cLayer->GetParameters();
792 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
793 const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
794 const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
797 const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
798 const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
799 const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
802 LstmInputParamsInfo paramsInfo;
805 paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
806 paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
807 paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
809 paramsInfo.m_RecurrentToForgetWeights =
810 &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
811 paramsInfo.m_RecurrentToCellWeights =
812 &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
813 paramsInfo.m_RecurrentToOutputWeights =
814 &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
816 paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
817 paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
818 paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
820 if(!descriptor.m_CifgEnabled)
822 paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
823 paramsInfo.m_RecurrentToInputWeights =
824 &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
825 paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
828 if(descriptor.m_ProjectionEnabled)
830 paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
833 if (cLayer->m_ProjectionParameters.m_ProjectionBias !=
nullptr)
835 paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
839 if(descriptor.m_PeepholeEnabled)
841 if (!descriptor.m_CifgEnabled)
843 paramsInfo.m_CellToInputWeights =
844 &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
847 paramsInfo.m_CellToForgetWeights =
848 &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
849 paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
852 if(descriptor.m_LayerNormEnabled)
854 if (!descriptor.m_CifgEnabled)
856 paramsInfo.m_InputLayerNormWeights =
857 &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
860 paramsInfo.m_ForgetLayerNormWeights =
861 &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
862 paramsInfo.m_CellLayerNormWeights =
863 &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
864 paramsInfo.m_OutputLayerNormWeights =
865 &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
868 result = layerSupportObject->IsQLstmSupported(input,
881 auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
884 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
885 const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
886 const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
889 const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
890 const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
893 QuantizedLstmInputParamsInfo paramsInfo;
895 paramsInfo.m_InputToInputWeights =
896 &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
897 paramsInfo.m_InputToForgetWeights =
898 &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
899 paramsInfo.m_InputToCellWeights =
900 &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
901 paramsInfo.m_InputToOutputWeights =
902 &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
904 paramsInfo.m_RecurrentToInputWeights =
905 &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
906 paramsInfo.m_RecurrentToForgetWeights =
907 &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
908 paramsInfo.m_RecurrentToCellWeights =
909 &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
910 paramsInfo.m_RecurrentToOutputWeights =
911 &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
913 paramsInfo.m_InputGateBias =
914 &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
915 paramsInfo.m_ForgetGateBias =
916 &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
917 paramsInfo.m_CellBias =
918 &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
919 paramsInfo.m_OutputGateBias =
920 &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
922 result = layerSupportObject->IsQuantizedLstmSupported(input,
933 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
934 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
935 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
936 result = layerSupportObject->IsDivisionSupported(
937 OverrideDataType(input0, dataType),
938 OverrideDataType(input1, dataType),
939 OverrideDataType(output, dataType),
945 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
946 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
947 result = layerSupportObject->IsRankSupported(OverrideDataType(input, dataType),
948 OverrideDataType(output, dataType),
954 auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
955 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
956 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
957 result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
958 OverrideDataType(output, dataType),
959 cLayer->GetParameters(),
965 auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
966 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
967 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
968 result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
969 OverrideDataType(output, dataType),
970 cLayer->GetParameters(),
976 auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
978 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
979 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
981 result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
982 OverrideDataType(output, dataType),
983 cLayer->GetParameters(),
989 auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
990 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
991 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
992 result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
993 OverrideDataType(output, dataType),
994 cLayer->GetParameters(),
1000 auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1001 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1002 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1003 result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1004 OverrideDataType(output, dataType),
1005 cLayer->GetParameters(),
1011 auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1013 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1014 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1016 result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1017 OverrideDataType(output, dataType),
1018 cLayer->GetParameters(),
1024 auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1025 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1028 auto getTensorInfo = [&dataType](
const OutputSlot& slot)
1030 return OverrideDataType(slot.GetTensorInfo(), dataType);
1034 std::vector<TensorInfo> outputs(beginI, endI);
1036 const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1038 result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
1040 cLayer->GetParameters(),
1046 auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1049 auto getTensorInfo = [&dataType](
const InputSlot& slot)
1051 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1055 std::vector<TensorInfo> inputs(beginI, endI);
1057 auto getTensorInfoPtr = [](
const TensorInfo&
info)
1063 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1065 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1067 result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1073 auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1076 auto getTensorInfoIn = [&dataType](
const InputSlot& slot)
1078 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1080 auto getTensorInfoOut = [&dataType](
const OutputSlot& slot)
1082 return OverrideDataType(slot.GetTensorInfo(), dataType);
1086 std::vector<TensorInfo> inputs(beginI, endI);
1090 std::vector<TensorInfo> outputs(beginO, endO);
1093 auto getTensorInfoPtr = [](
const TensorInfo&
info)
1099 std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1103 std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1106 result = layerSupportObject->IsStandInSupported(inputPtrs,
1108 cLayer->GetParameters(),
1114 auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1115 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1116 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1117 result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
1118 OverrideDataType(output, dataType),
1119 cLayer->GetParameters(),
1125 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1126 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1127 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1128 result = layerSupportObject->IsSubtractionSupported(
1129 OverrideDataType(input0, dataType),
1130 OverrideDataType(input1, dataType),
1131 OverrideDataType(output, dataType),
1137 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1138 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1139 const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1140 const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1141 result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
1142 OverrideDataType(input1, dataType),
1143 OverrideDataType(output0, dataType),
1144 OverrideDataType(output1, dataType),
1150 auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1151 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1152 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1153 result = layerSupportObject->IsMeanSupported(
1154 OverrideDataType(input, dataType),
1155 OverrideDataType(output, dataType),
1156 cLayer->GetParameters(),
1162 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1163 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1164 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1165 result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1166 OverrideDataType(input1, dataType),
1167 OverrideDataType(output, dataType),
1173 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1174 const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1175 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1176 result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1177 OverrideDataType(alpha, dataType),
1178 OverrideDataType(output, dataType),
1184 auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1185 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1186 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1187 result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1188 OverrideDataType(output, dataType),
1189 cLayer->GetParameters(),
1195 auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1197 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1199 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1201 const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1203 Optional<TensorInfo> biases;
1204 if (descriptor.m_BiasEnabled)
1207 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1212 const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1214 result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1225 auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1226 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1227 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1229 result = layerSupportObject->IsReduceSupported(OverrideDataType(input, dataType),
1230 OverrideDataType(output, dataType),
1231 cLayer->GetParameters(),
1237 ARMNN_ASSERT_MSG(
false,
"WorkloadFactory did not recognise type of layer.");
1238 reason.value() =
"Unrecognised layer type";
1249 std::string& outReasonIfUnsupported)
1251 return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1256 std::string& outReasonIfUnsupported)
1258 auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1259 return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1265 std::string& outReasonIfUnsupported,
1268 auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1269 return IsLayerConfigurationSupported(layer->GetBackendId(),
1272 outReasonIfUnsupported,
1279 std::string& outReasonIfUnsupported,
1282 return IsLayerConfigurationSupported(backendId,
1285 outReasonIfUnsupported,
1293 return std::unique_ptr<IWorkload>();
1299 return std::unique_ptr<IWorkload>();
1305 return std::unique_ptr<IWorkload>();
1311 return std::unique_ptr<IWorkload>();
1317 return std::unique_ptr<IWorkload>();
1323 return std::unique_ptr<IWorkload>();
1329 return std::unique_ptr<IWorkload>();
1335 return std::unique_ptr<IWorkload>();
1341 return std::unique_ptr<IWorkload>();
1347 return std::unique_ptr<IWorkload>();
1353 return std::unique_ptr<IWorkload>();
1359 return std::unique_ptr<IWorkload>();
1365 return std::unique_ptr<IWorkload>();
1371 return std::unique_ptr<IWorkload>();
1377 return std::unique_ptr<IWorkload>();
1383 return std::unique_ptr<IWorkload>();
1389 return std::unique_ptr<IWorkload>();
1395 return std::unique_ptr<IWorkload>();
1401 return std::unique_ptr<IWorkload>();
1407 return std::unique_ptr<IWorkload>();
1413 return std::unique_ptr<IWorkload>();
1419 return std::unique_ptr<IWorkload>();
1425 return std::unique_ptr<IWorkload>();
1431 return std::unique_ptr<IWorkload>();
1437 return std::unique_ptr<IWorkload>();
1443 return std::unique_ptr<IWorkload>();
1449 return std::unique_ptr<IWorkload>();
1455 return std::unique_ptr<IWorkload>();
1462 return std::unique_ptr<IWorkload>();
1468 return std::unique_ptr<IWorkload>();
1474 return std::unique_ptr<IWorkload>();
1480 return std::unique_ptr<IWorkload>();
1486 return std::unique_ptr<IWorkload>();
1492 return std::unique_ptr<IWorkload>();
1498 return std::unique_ptr<IWorkload>();
1504 return std::unique_ptr<IWorkload>();
1510 return std::unique_ptr<IWorkload>();
1516 return std::unique_ptr<IWorkload>();
1522 return std::unique_ptr<IWorkload>();
1528 return std::unique_ptr<IWorkload>();
1534 return std::unique_ptr<IWorkload>();
1540 return std::unique_ptr<IWorkload>();
1546 return std::unique_ptr<IWorkload>();
1552 return std::unique_ptr<IWorkload>();
1558 return std::unique_ptr<IWorkload>();
1564 return std::unique_ptr<IWorkload>();
1570 return std::unique_ptr<IWorkload>();
1576 return std::unique_ptr<IWorkload>();
1582 return std::unique_ptr<IWorkload>();
1588 return std::unique_ptr<IWorkload>();
1594 return std::unique_ptr<IWorkload>();
1600 return std::unique_ptr<IWorkload>();
1605 return std::unique_ptr<IWorkload>();
1611 return std::unique_ptr<IWorkload>();
1617 return std::unique_ptr<IWorkload>();
1623 return std::unique_ptr<IWorkload>();
1629 return std::unique_ptr<IWorkload>();
1635 return std::unique_ptr<IWorkload>();
1641 return std::unique_ptr<IWorkload>();
1647 return std::unique_ptr<IWorkload>();
1653 return std::unique_ptr<IWorkload>();
1659 return std::unique_ptr<IWorkload>();
1665 return std::unique_ptr<IWorkload>();
1671 return std::unique_ptr<IWorkload>();
1677 return std::unique_ptr<IWorkload>();
1683 return std::unique_ptr<IWorkload>();
1689 return std::unique_ptr<IWorkload>();
1695 return std::unique_ptr<IWorkload>();
1702 return std::unique_ptr<IWorkload>();
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< BackendOptions > ModelOptions
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQLstm(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
Copyright (c) 2021 ARM Limited and Contributors.
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateRank(const RankQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_ASSERT_MSG(COND, MSG)
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
#define ARMNN_ASSERT(COND)
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogicalBinary(const LogicalBinaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFill(const FillQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const