// // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include #include #include #include #include #include #include #include #include #include #include #include namespace armnn { namespace { using LayerList = std::list; using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally. const TensorInfo OverrideDataType(const TensorInfo& info, Optional type) { if (!type) { return info; } return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset(), info.IsConstant()); } } // anonymous namespace inline armnn::Optional GetBiasTypeFromWeightsType(armnn::Optional weightsType) { if (!weightsType) { return weightsType; } switch(weightsType.value()) { case armnn::DataType::BFloat16: case armnn::DataType::Float16: case armnn::DataType::Float32: return weightsType; case armnn::DataType::QAsymmS8: case armnn::DataType::QAsymmU8: case armnn::DataType::QSymmS8: case armnn::DataType::QSymmS16: return armnn::DataType::Signed32; default: ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); } return armnn::EmptyOptional(); } bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const IConnectableLayer& connectableLayer, Optional dataType, std::string& outReasonIfUnsupported, const ModelOptions& modelOptions) { Optional reason = outReasonIfUnsupported; bool result; const Layer& layer = *(PolymorphicDowncast(&connectableLayer)); auto const& backendRegistry = BackendRegistryInstance(); if (!backendRegistry.IsBackendRegistered(backendId)) { std::stringstream ss; ss << connectableLayer.GetName() << " is not supported on " << backendId << " because this backend is not registered."; outReasonIfUnsupported = ss.str(); return false; } auto backendFactory = backendRegistry.GetFactory(backendId); auto backendObject = backendFactory(); auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId); switch(layer.GetType()) { case LayerType::Activation: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsActivationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Addition: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsAdditionSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::ArgMinMax: { auto cLayer = PolymorphicDowncast(&layer); const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters(); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsArgMinMaxSupported( OverrideDataType(input, dataType), OverrideDataType(output, DataType::Signed32), descriptor, reason); break; } case LayerType::BatchNormalization: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo(); const TensorInfo& var = cLayer->m_Variance->GetTensorInfo(); const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo(); const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo(); result = layerSupportObject.IsBatchNormalizationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), OverrideDataType(mean, dataType), OverrideDataType(var, dataType), OverrideDataType(beta, dataType), OverrideDataType(gamma, dataType), cLayer->GetParameters(), reason); break; } case LayerType::BatchToSpaceNd: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); auto cLayer = PolymorphicDowncast(&layer); result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Cast: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::ChannelShuffle: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const ChannelShuffleDescriptor descriptor = cLayer->GetParameters(); result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, reason); break; } case LayerType::Comparison: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, DataType::Boolean), cLayer->GetParameters(), reason); break; } case LayerType::Constant: { const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason); break; } case LayerType::ConvertBf16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason); break; } case LayerType::ConvertFp16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason); break; } case LayerType::ConvertFp32ToBf16: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason); break; } case LayerType::ConvertFp32ToFp16: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason); break; } case LayerType::Convolution2d: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); const Convolution2dDescriptor& descriptor = cLayer->GetParameters(); // Construct optional biases object based on the value of m_BiasEnabled Optional biases; if (descriptor.m_BiasEnabled) { biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); } result = layerSupportObject.IsConvolution2dSupported( input, output, descriptor, OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType), biases, reason); break; } case LayerType::Convolution3d: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(), "Convolution3dLayer: Weights should be connected as a Constant Layer."); const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType); const Convolution3dDescriptor& descriptor = cLayer->GetParameters(); // Construct optional biases object based on the value of m_BiasEnabled Optional biases; if (descriptor.m_BiasEnabled) { biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); } result = layerSupportObject.IsConvolution3dSupported( input, output, descriptor, weights, biases, reason); break; } case LayerType::Debug: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::DepthToSpace: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::DepthwiseConvolution2d: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters(); // Construct optional biases object based on the value of m_BiasEnabled Optional biases; if (descriptor.m_BiasEnabled) { biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); } result = layerSupportObject.IsDepthwiseConvolutionSupported( input, output, descriptor, OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType), biases, reason); break; } case LayerType::Dequantize: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsDequantizeSupported(input, OverrideDataType(output, dataType), reason); break; } case LayerType::DetectionPostProcess: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo(); const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo(); const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo(); const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo(); const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters(); result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor, reason); break; } case LayerType::ElementwiseUnary: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Fill: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); const FillDescriptor& descriptor = cLayer->GetParameters(); result = layerSupportObject.IsFillSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, reason); break; } case LayerType::FakeQuantization: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Floor: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::FullyConnected: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); const FullyConnectedDescriptor& descriptor = cLayer->GetParameters(); TensorInfo weightsInfo; const TensorInfo* weightsInfoPtr = nullptr; weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType); weightsInfoPtr = &weightsInfo; TensorInfo biasInfo; const TensorInfo* biasInfoPtr = nullptr; static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16); static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16); static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32); static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32); if (descriptor.m_BiasEnabled) { biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType); biasInfoPtr = &biasInfo; } else { // If biases are not enabled pass a dummy tensorinfo for the validation switch(input.GetDataType()) { case DataType::BFloat16: { biasInfoPtr = &dummyBFloat16Bias; break; } case DataType::Float16: { biasInfoPtr = &dummyFloat16Bias; break; } case DataType::Float32: { biasInfoPtr = &dummyFloat32Bias; break; } case DataType::QAsymmU8: case DataType::QAsymmS8: case DataType::QSymmS8: case DataType::QSymmS16: { biasInfoPtr = &dummyQA8Bias; break; } default: { ARMNN_ASSERT_MSG(false, "Unexpected bias type"); } } } result = layerSupportObject.IsFullyConnectedSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), *weightsInfoPtr, *biasInfoPtr, descriptor, reason); break; } case LayerType::Gather: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); auto cLayer = PolymorphicDowncast(&layer); const GatherDescriptor& descriptor = cLayer->GetParameters(); result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType), input1, OverrideDataType(output, dataType), descriptor, reason); break; } case LayerType::Input: { const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason); break; } case LayerType::InstanceNormalization: { auto cLayer = PolymorphicDowncast(&layer); const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters(); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsInstanceNormalizationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, reason); break; } case LayerType::L2Normalization: { auto cLayer = PolymorphicDowncast(&layer); const L2NormalizationDescriptor& descriptor = cLayer->GetParameters(); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsL2NormalizationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, reason); break; } case LayerType::LogicalBinary: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsLogicalBinarySupported(input0, input1, output, cLayer->GetParameters(), reason); break; } case LayerType::LogSoftmax: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Lstm: { auto cLayer = PolymorphicDowncast(&layer); const LstmDescriptor& descriptor = cLayer->GetParameters(); // All inputs. const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType); const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType); // All outputs const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType); const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType); const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType); // Basic parameters const TensorInfo& inputToForgetWeights = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType); const TensorInfo& inputToCellWeights = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType); const TensorInfo& inputToOutputWeights = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType); const TensorInfo& recurrentToForgetWeights = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType); const TensorInfo& recurrentToCellWeights = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType); const TensorInfo& recurrentToOutputWeights = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType); const TensorInfo& forgetGateBias = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType); const TensorInfo& cellBias = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType); const TensorInfo& outputGateBias = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType); LstmInputParamsInfo paramsInfo; paramsInfo.m_InputToForgetWeights = &inputToForgetWeights; paramsInfo.m_InputToCellWeights = &inputToCellWeights; paramsInfo.m_InputToOutputWeights = &inputToOutputWeights; paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights; paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights; paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights; paramsInfo.m_ForgetGateBias = &forgetGateBias; paramsInfo.m_CellBias = &cellBias; paramsInfo.m_OutputGateBias = &outputGateBias; // Optional parameters TensorInfo optInputToInputWeights; TensorInfo optRecurrentToInputWeights; TensorInfo optCellToInputWeights; TensorInfo optInputGateBias; TensorInfo optProjectionWeights; TensorInfo optProjectionBias; TensorInfo optCellToForgetWeights; TensorInfo optCellToOutputWeights; TensorInfo optInputLayerNormWeights; TensorInfo optForgetLayerNormWeights; TensorInfo optCellLayerNormWeights; TensorInfo optOutputLayerNormWeights; if(!descriptor.m_CifgEnabled) { optInputToInputWeights = OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType); paramsInfo.m_InputToInputWeights = &optInputToInputWeights; optRecurrentToInputWeights = OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType); paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights; optInputGateBias = OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType); paramsInfo.m_InputGateBias = &optInputGateBias; } if(descriptor.m_ProjectionEnabled) { optProjectionWeights = OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType); paramsInfo.m_ProjectionWeights = &optProjectionWeights; if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr) { optProjectionBias = OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType); paramsInfo.m_ProjectionBias = &optProjectionBias; } } if(descriptor.m_PeepholeEnabled) { if(!descriptor.m_CifgEnabled) { optCellToInputWeights = OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(), dataType); paramsInfo.m_CellToInputWeights = &optCellToInputWeights; } optCellToForgetWeights = OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType); paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights; optCellToOutputWeights = OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType); paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights; } if(descriptor.m_LayerNormEnabled) { if (!descriptor.m_CifgEnabled) { optInputLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights; } optForgetLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights; optCellLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights; optOutputLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights; } result = layerSupportObject.IsLstmSupported( input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output, descriptor, paramsInfo, reason); break; } case LayerType::Maximum: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::MemCopy: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::MemImport: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Merge: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Concat: { auto cLayer = PolymorphicDowncast(&layer); // Get vector of all inputs. auto getTensorInfo = [&dataType](const InputSlot& slot) { return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); }; auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo); auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo); std::vector inputs(beginI, endI); auto getTensorInfoPtr = [](const TensorInfo& info) { return &info; }; auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr); auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr); std::vector inputPtrs(beginPtr, endPtr); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason); break; } case LayerType::Multiplication: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMultiplicationSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Normalization: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Output: { const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason); break; } case LayerType::Permute: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Pad: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsPadSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Pooling2d: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Pooling3d: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::PreCompiled: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Quantize: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsQuantizeSupported(input, output, reason); break; } case LayerType::QLstm: { auto cLayer = PolymorphicDowncast(&layer); const QLstmDescriptor& descriptor = cLayer->GetParameters(); // Inputs const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo(); // Outputs const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo(); // Lstm parameters LstmInputParamsInfo paramsInfo; // Basic parameters ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr); ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr); ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr); paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(); paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(); paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(); paramsInfo.m_RecurrentToForgetWeights = &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(); paramsInfo.m_RecurrentToCellWeights = &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(); paramsInfo.m_RecurrentToOutputWeights = &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(); paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(); paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(); paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(); if(!descriptor.m_CifgEnabled) { paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(); paramsInfo.m_RecurrentToInputWeights = &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(); paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(); } if(descriptor.m_ProjectionEnabled) { paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(); // Projection bias is optional even if projection is enabled if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr) { paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(); } } if(descriptor.m_PeepholeEnabled) { if (!descriptor.m_CifgEnabled) { paramsInfo.m_CellToInputWeights = &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(); } paramsInfo.m_CellToForgetWeights = &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(); paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(); } if(descriptor.m_LayerNormEnabled) { if (!descriptor.m_CifgEnabled) { paramsInfo.m_InputLayerNormWeights = &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(); } paramsInfo.m_ForgetLayerNormWeights = &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(); paramsInfo.m_CellLayerNormWeights = &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(); paramsInfo.m_OutputLayerNormWeights = &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(); } result = layerSupportObject.IsQLstmSupported(input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output, descriptor, paramsInfo, reason); break; } case LayerType::QuantizedLstm: { auto cLayer = PolymorphicDowncast(&layer); // Inputs const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo(); // Outputs const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo(); // QuantizedLstm parameters QuantizedLstmInputParamsInfo paramsInfo; paramsInfo.m_InputToInputWeights = &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(); paramsInfo.m_InputToForgetWeights = &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(); paramsInfo.m_InputToCellWeights = &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(); paramsInfo.m_InputToOutputWeights = &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(); paramsInfo.m_RecurrentToInputWeights = &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo(); paramsInfo.m_RecurrentToForgetWeights = &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo(); paramsInfo.m_RecurrentToCellWeights = &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo(); paramsInfo.m_RecurrentToOutputWeights = &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo(); paramsInfo.m_InputGateBias = &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(); paramsInfo.m_ForgetGateBias = &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(); paramsInfo.m_CellBias = &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(); paramsInfo.m_OutputGateBias = &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();; result = layerSupportObject.IsQuantizedLstmSupported(input, previousCellStateIn, previousOutputIn, cellStateOut, output, paramsInfo, reason); break; } case LayerType::Division: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsDivisionSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Rank: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Reshape: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Resize: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Shape: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Slice: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Softmax: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::SpaceToBatchNd: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::SpaceToDepth: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Splitter: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); // Get vector of all outputs. auto getTensorInfo = [&dataType](const OutputSlot& slot) { return OverrideDataType(slot.GetTensorInfo(), dataType); }; auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo); auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo); std::vector outputs(beginI, endI); const std::vector> outputPtrs(outputs.begin(), outputs.end()); result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType), outputPtrs, cLayer->GetParameters(), reason); break; } case LayerType::Stack: { auto cLayer = PolymorphicDowncast(&layer); // Get vector of all inputs. auto getTensorInfo = [&dataType](const InputSlot& slot) { return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); }; auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo); auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo); std::vector inputs(beginI, endI); auto getTensorInfoPtr = [](const TensorInfo& info) { return &info; }; auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr); auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr); std::vector inputPtrs(beginPtr, endPtr); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason); break; } case LayerType::StandIn: { auto cLayer = PolymorphicDowncast(&layer); // Get vector of all inputs. auto getTensorInfoIn = [&dataType](const InputSlot& slot) { return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); }; auto getTensorInfoOut = [&dataType](const OutputSlot& slot) { return OverrideDataType(slot.GetTensorInfo(), dataType); }; auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn); auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn); std::vector inputs(beginI, endI); auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut); auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut); std::vector outputs(beginO, endO); auto getTensorInfoPtr = [](const TensorInfo& info) { return &info; }; auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr); auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr); std::vector inputPtrs(beginPtrI, endPtrI); auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr); auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr); std::vector outputPtrs(beginPtrO, endPtrO); result = layerSupportObject.IsStandInSupported(inputPtrs, outputPtrs, cLayer->GetParameters(), reason); break; } case LayerType::StridedSlice: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Subtraction: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsSubtractionSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Switch: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo(); result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output0, dataType), OverrideDataType(output1, dataType), reason); break; } case LayerType::Mean: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMeanSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::Minimum: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Prelu: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType), OverrideDataType(alpha, dataType), OverrideDataType(output, dataType), reason); break; } case LayerType::Transpose: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::TransposeConvolution2d: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters(); Optional biases; if (descriptor.m_BiasEnabled) { ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr); biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); } ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType); result = layerSupportObject.IsTransposeConvolution2dSupported(input, output, descriptor, weights, biases, reason); break; } case LayerType::Reduce: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; } case LayerType::UnidirectionalSequenceLstm: { auto cLayer = PolymorphicDowncast(&layer); const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters(); // All inputs. const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType); const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType); // Outputs const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); // Basic parameters const TensorInfo& inputToForgetWeights = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType); const TensorInfo& inputToCellWeights = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType); const TensorInfo& inputToOutputWeights = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType); const TensorInfo& recurrentToForgetWeights = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType); const TensorInfo& recurrentToCellWeights = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType); const TensorInfo& recurrentToOutputWeights = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType); const TensorInfo& forgetGateBias = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType); const TensorInfo& cellBias = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType); const TensorInfo& outputGateBias = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType); LstmInputParamsInfo paramsInfo; paramsInfo.m_InputToForgetWeights = &inputToForgetWeights; paramsInfo.m_InputToCellWeights = &inputToCellWeights; paramsInfo.m_InputToOutputWeights = &inputToOutputWeights; paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights; paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights; paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights; paramsInfo.m_ForgetGateBias = &forgetGateBias; paramsInfo.m_CellBias = &cellBias; paramsInfo.m_OutputGateBias = &outputGateBias; // Optional parameters TensorInfo optInputToInputWeights; TensorInfo optRecurrentToInputWeights; TensorInfo optCellToInputWeights; TensorInfo optInputGateBias; TensorInfo optProjectionWeights; TensorInfo optProjectionBias; TensorInfo optCellToForgetWeights; TensorInfo optCellToOutputWeights; TensorInfo optInputLayerNormWeights; TensorInfo optForgetLayerNormWeights; TensorInfo optCellLayerNormWeights; TensorInfo optOutputLayerNormWeights; if(!descriptor.m_CifgEnabled) { optInputToInputWeights = OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType); paramsInfo.m_InputToInputWeights = &optInputToInputWeights; optRecurrentToInputWeights = OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType); paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights; optInputGateBias = OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType); paramsInfo.m_InputGateBias = &optInputGateBias; } if(descriptor.m_ProjectionEnabled) { optProjectionWeights = OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType); paramsInfo.m_ProjectionWeights = &optProjectionWeights; if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr) { optProjectionBias = OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType); paramsInfo.m_ProjectionBias = &optProjectionBias; } } if(descriptor.m_PeepholeEnabled) { if(!descriptor.m_CifgEnabled) { optCellToInputWeights = OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(), dataType); paramsInfo.m_CellToInputWeights = &optCellToInputWeights; } optCellToForgetWeights = OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType); paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights; optCellToOutputWeights = OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType); paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights; } if(descriptor.m_LayerNormEnabled) { if (!descriptor.m_CifgEnabled) { optInputLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights; } optForgetLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights; optCellLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights; optOutputLayerNormWeights = OverrideDataType( cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType); paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights; } Optional hiddenStateOut; Optional cellStateOut; result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input, outputStateIn, cellStateIn, output, hiddenStateOut, cellStateOut, descriptor, paramsInfo, reason); break; } default: { ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); reason.value() = "Unrecognised layer type"; result = false; break; } } return result; } bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const IConnectableLayer& connectableLayer, Optional dataType, std::string& outReasonIfUnsupported) { return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported); } bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer, Optional dataType, std::string& outReasonIfUnsupported) { auto layer = PolymorphicDowncast(&connectableLayer); return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported); } // TODO merge with defaulted modelOptions above bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer, Optional dataType, std::string& outReasonIfUnsupported, const ModelOptions& modelOptions) { auto layer = PolymorphicDowncast(&connectableLayer); return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported, modelOptions); } bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const IConnectableLayer& connectableLayer, Optional dataType, std::string& outReasonIfUnsupported, const ModelOptions& modelOptions) { return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported, modelOptions); } ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr IWorkloadFactory::CreateWorkload(LayerType type, const QueueDescriptor& descriptor, const WorkloadInfo& info) const { switch(type) { case LayerType::Activation : { auto activationQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateActivation(*activationQueueDescriptor, info); } case LayerType::Addition : { auto additionQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateAddition(*additionQueueDescriptor, info); } case LayerType::ArgMinMax : { auto argMinMaxQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateArgMinMax(*argMinMaxQueueDescriptor, info); } case LayerType::BatchNormalization : { auto batchNormQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateBatchNormalization(*batchNormQueueDescriptor, info); } case LayerType::BatchToSpaceNd : { auto batchToSpaceNdQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info); } case LayerType::Cast : { auto castQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateCast(*castQueueDescriptor, info); } case LayerType::ChannelShuffle : { auto channelShuffleQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateChannelShuffle(*channelShuffleQueueDescriptor, info); } case LayerType::Comparison : { auto comparisonQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateComparison(*comparisonQueueDescriptor, info); } case LayerType::Concat : { auto concatQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConcat(*concatQueueDescriptor, info); } case LayerType::Constant : { auto constantQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConstant(*constantQueueDescriptor, info); } case LayerType::ConvertBf16ToFp32 : { auto convertBf16ToFp32QueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info); } case LayerType::ConvertFp16ToFp32: { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info); } case LayerType::ConvertFp32ToBf16: { auto convertFp32ToBf16QueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info); } case LayerType::ConvertFp32ToFp16: { auto convertFp32ToFp16QueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info); } case LayerType::Convolution2d: { auto convolution2dQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConvolution2d(*convolution2dQueueDescriptor, info); } case LayerType::Convolution3d: { auto convolution3dQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateConvolution3d(*convolution3dQueueDescriptor, info); } case LayerType::Debug: { auto debugQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateDebug(*debugQueueDescriptor, info); } case LayerType::DepthToSpace: { auto depthToSpaceQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info); } case LayerType::DepthwiseConvolution2d: { auto depthwiseConvolution2DQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info); } case LayerType::Dequantize: { auto dequantizeQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateDequantize(*dequantizeQueueDescriptor, info); } case LayerType::DetectionPostProcess: { auto detectionPostProcessQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info); } case LayerType::Division: { auto divisionQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateDivision(*divisionQueueDescriptor, info); } case LayerType::ElementwiseUnary: { auto elementwiseUnaryQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info); } case LayerType::FakeQuantization: { auto fakeQuantizationQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info); } case LayerType::Fill: { auto fillQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateFill(*fillQueueDescriptor, info); } case LayerType::Floor: { auto floorQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateFloor(*floorQueueDescriptor, info); } case LayerType::FullyConnected: { auto fullyConnectedQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateFullyConnected(*fullyConnectedQueueDescriptor, info); } case LayerType::Gather: { auto gatherQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateGather(*gatherQueueDescriptor, info); } case LayerType::Input: { auto inputQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateInput(*inputQueueDescriptor, info); } case LayerType::InstanceNormalization: { auto instanceNormalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info); } case LayerType::L2Normalization: { auto l2NormalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateL2Normalization(*l2NormalizationQueueDescriptor, info); } case LayerType::LogicalBinary: { auto logicalBinaryQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info); } case LayerType::LogSoftmax: { auto logSoftmaxQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info); } case LayerType::Lstm: { auto lstmQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateLstm(*lstmQueueDescriptor, info); } case LayerType::Maximum: { auto maximumQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateMaximum(*maximumQueueDescriptor, info); } case LayerType::Mean: { auto meanQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateMean(*meanQueueDescriptor, info); } case LayerType::MemCopy: { auto memCopyQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateMemCopy(*memCopyQueueDescriptor, info); } case LayerType::MemImport: { auto memImportQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateMemImport(*memImportQueueDescriptor, info); } case LayerType::Minimum: { auto minimumQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateMinimum(*minimumQueueDescriptor, info); } case LayerType::Multiplication: { auto multiplicationQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateMultiplication(*multiplicationQueueDescriptor, info); } case LayerType::Normalization: { auto normalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateNormalization(*normalizationQueueDescriptor, info); } case LayerType::Output: { auto outputQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateOutput(*outputQueueDescriptor, info); } case LayerType::Pad: { auto padQueueDescriptor = PolymorphicDowncast(&descriptor); return CreatePad(*padQueueDescriptor, info); } case LayerType::Permute: { auto permuteQueueDescriptor = PolymorphicDowncast(&descriptor); return CreatePermute(*permuteQueueDescriptor, info); } case LayerType::Pooling2d: { auto pooling2dQueueDescriptor = PolymorphicDowncast(&descriptor); return CreatePooling2d(*pooling2dQueueDescriptor, info); } case LayerType::Pooling3d: { auto pooling3dQueueDescriptor = PolymorphicDowncast(&descriptor); return CreatePooling3d(*pooling3dQueueDescriptor, info); } case LayerType::PreCompiled: { auto preCompiledQueueDescriptor = PolymorphicDowncast(&descriptor); return CreatePreCompiled(*preCompiledQueueDescriptor, info); } case LayerType::Prelu: { auto preluQueueDescriptor = PolymorphicDowncast(&descriptor); return CreatePrelu(*preluQueueDescriptor, info); } case LayerType::QLstm: { auto qlstmQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateQLstm(*qlstmQueueDescriptor, info); } case LayerType::Quantize: { auto quantizeQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateQuantize(*quantizeQueueDescriptor, info); } case LayerType::Rank: { auto rankQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateRank(*rankQueueDescriptor, info); } case LayerType::Reduce: { auto reduceQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateReduce(*reduceQueueDescriptor, info); } case LayerType::Reshape: { auto reshapeQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateReshape(*reshapeQueueDescriptor, info); } case LayerType::Resize: { auto resizeQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateResize(*resizeQueueDescriptor, info); } case LayerType::Shape: { auto shapeQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateShape(*shapeQueueDescriptor, info); } case LayerType::Slice: { auto sliceQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateSlice(*sliceQueueDescriptor, info); } case LayerType::Softmax: { auto softmaxQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateSoftmax(*softmaxQueueDescriptor, info); } case LayerType::SpaceToBatchNd: { auto spaceToBatchNdQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info); } case LayerType::SpaceToDepth: { auto spaceToDepthQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info); } case LayerType::Splitter: { auto splitterQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateSplitter(*splitterQueueDescriptor, info); } case LayerType::Stack: { auto stackQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateStack(*stackQueueDescriptor, info); } case LayerType::StridedSlice: { auto stridedSliceQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateStridedSlice(*stridedSliceQueueDescriptor, info); } case LayerType::Subtraction: { auto subtractionQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateSubtraction(*subtractionQueueDescriptor, info); } case LayerType::Transpose: { auto transposeQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateTranspose(*transposeQueueDescriptor, info); } case LayerType::TransposeConvolution2d: { auto transposeConvolution2dQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info); } case LayerType::UnidirectionalSequenceLstm: { auto unidirectionalSequenceLstmQueueDescriptor = PolymorphicDowncast(&descriptor); return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info); } default: return nullptr; } } ARMNN_NO_DEPRECATE_WARN_END std::unique_ptr IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateBatchNormalization( const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/, const WorkloadInfo& /*Info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateDepthwiseConvolution2d( const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateDequantize( const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateDetectionPostProcess( const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateInstanceNormalization( const InstanceNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*Info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*Info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/, const WorkloadInfo &/*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*Info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } std::unique_ptr IWorkloadFactory::CreateUnidirectionalSequenceLstm( const UnidirectionalSequenceLstmQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr(); } } // namepsace armnn