From f0a6dec75832604d5ab18242dc216852821a8279 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Thu, 25 Mar 2021 07:46:55 +0000 Subject: IVGCVSW-5736 and IVGCVSW-5743 'NonConstWeights: Update front-end and TfLiteDelegate support for FullyConnected Operator' * Added front-end support for non-const weights for FULLY_CONNECTED operator * Added FULLY_CONNECTED end-to-end test * Updated FULLY_CONNECTED operator support in TfLite Arm NN Delegate for non-const weights * Updated the version numbers Signed-off-by: Sadik Armagan Change-Id: Iffa5b9aa9297aca4c02d923cce4636c88ac21faa --- src/backends/backendsCommon/WorkloadData.cpp | 38 +- src/backends/backendsCommon/WorkloadFactory.cpp | 408 +++++++++++---------- src/backends/backendsCommon/test/CMakeLists.txt | 1 + .../backendsCommon/test/CompatibilityTests.cpp | 16 + .../test/FullyConnectedEndToEndTestImpl.hpp | 97 +++++ .../test/layerTests/FullyConnectedTestImpl.cpp | 105 +++++- .../test/layerTests/FullyConnectedTestImpl.hpp | 3 +- 7 files changed, 454 insertions(+), 214 deletions(-) create mode 100644 src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 90db57f953..2c5303c019 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1022,7 +1022,16 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c { const std::string descriptorName{"FullyConnectedQueueDescriptor"}; - ValidateNumInputs(workloadInfo, descriptorName, 1); + uint32_t numInputs = 1; + if (!m_Parameters.m_ConstantWeights) + { + numInputs = 2; + if (m_Parameters.m_BiasEnabled) + { + numInputs = 3; + } + } + ValidateNumInputs(workloadInfo, descriptorName, numInputs); ValidateNumOutputs(workloadInfo, descriptorName, 1); const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; @@ -1035,19 +1044,32 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions."); } - ValidatePointer(m_Weight, descriptorName, "weight"); - - const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo(); + TensorInfo weightTensorInfo; + if (m_Parameters.m_ConstantWeights) + { + ValidatePointer(m_Weight, descriptorName, "weight"); + weightTensorInfo = m_Weight->GetTensorInfo(); + } + else + { + weightTensorInfo = workloadInfo.m_InputTensorInfos[1]; + } ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight"); if (m_Parameters.m_BiasEnabled) { - ValidatePointer(m_Bias, descriptorName, "bias"); - + TensorInfo biasTensorInfo; + if (m_Parameters.m_ConstantWeights) + { + ValidatePointer(m_Bias, descriptorName, "bias"); + biasTensorInfo = m_Bias->GetTensorInfo(); + } + else + { + biasTensorInfo = workloadInfo.m_InputTensorInfos[2]; + } // Validates type and quantization values. - const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo(); ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName); - ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias"); ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias"); } diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 19281a82e9..20d7134c3a 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto backendFactory = backendRegistry.GetFactory(backendId); auto backendObject = backendFactory(); - auto layerSupportObject = backendObject->GetLayerSupport(modelOptions); + auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId); switch(layer.GetType()) { @@ -72,7 +73,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsActivationSupported( + result = layerSupportObject.IsActivationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), @@ -84,7 +85,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsAdditionSupported( + result = layerSupportObject.IsAdditionSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), @@ -98,7 +99,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsArgMinMaxSupported( + result = layerSupportObject.IsArgMinMaxSupported( OverrideDataType(input, dataType), OverrideDataType(output, DataType::Signed32), descriptor, @@ -114,7 +115,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& var = cLayer->m_Variance->GetTensorInfo(); const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo(); const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo(); - result = layerSupportObject->IsBatchNormalizationSupported( + result = layerSupportObject.IsBatchNormalizationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), OverrideDataType(mean, dataType), @@ -131,10 +132,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); auto cLayer = PolymorphicDowncast(&layer); - result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Comparison: @@ -145,45 +146,45 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType), - OverrideDataType(input1, dataType), - OverrideDataType(output, DataType::Boolean), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType), + OverrideDataType(input1, dataType), + OverrideDataType(output, DataType::Boolean), + cLayer->GetParameters(), + reason); break; } case LayerType::Constant: { const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason); + result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason); break; } case LayerType::ConvertBf16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason); + result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason); break; } case LayerType::ConvertFp16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason); + result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason); break; } case LayerType::ConvertFp32ToBf16: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsConvertFp32ToBf16Supported(input, output, reason); + result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason); break; } case LayerType::ConvertFp32ToFp16: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason); + result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason); break; } case LayerType::Convolution2d: @@ -205,7 +206,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); } - result = layerSupportObject->IsConvolution2dSupported( + result = layerSupportObject.IsConvolution2dSupported( input, output, descriptor, @@ -219,7 +220,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType), + result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), reason); break; @@ -231,7 +232,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType), + result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), reason); @@ -255,7 +256,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); } - result = layerSupportObject->IsDepthwiseConvolutionSupported( + result = layerSupportObject.IsDepthwiseConvolutionSupported( input, output, descriptor, @@ -269,9 +270,9 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsDequantizeSupported(input, - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsDequantizeSupported(input, + OverrideDataType(output, dataType), + reason); break; } case LayerType::DetectionPostProcess: @@ -287,15 +288,15 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo(); const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters(); - result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings, - scores, - anchors, - detectionBoxes, - detectionClasses, - detectionScores, - numDetections, - descriptor, - reason); + result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings, + scores, + anchors, + detectionBoxes, + detectionClasses, + detectionScores, + numDetections, + descriptor, + reason); break; } case LayerType::ElementwiseUnary: @@ -305,10 +306,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Fill: @@ -318,7 +319,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); const FillDescriptor& descriptor = cLayer->GetParameters(); - result = layerSupportObject->IsFillSupported( + result = layerSupportObject.IsFillSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, @@ -329,18 +330,18 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Floor: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::FullyConnected: @@ -348,21 +349,43 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); + + const FullyConnectedDescriptor& descriptor = cLayer->GetParameters(); + TensorInfo weightsInfo; + const TensorInfo* weightsInfoPtr = nullptr; + + if (descriptor.m_ConstantWeights) + { + ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); + weightsInfo = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType); + } + else + { + weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType); + + } + weightsInfoPtr = &weightsInfo; TensorInfo biasInfo; - const TensorInfo * biasInfoPtr = nullptr; + const TensorInfo* biasInfoPtr = nullptr; static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16); static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16); static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32); static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32); - const FullyConnectedDescriptor& descriptor = cLayer->GetParameters(); if (descriptor.m_BiasEnabled) { - ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr); - biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); - biasInfoPtr = &biasInfo; + if(descriptor.m_ConstantWeights) + { + ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr); + biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)); + biasInfoPtr = &biasInfo; + } + else + { + biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType); + biasInfoPtr = &biasInfo; + } } else { @@ -398,11 +421,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, } } } - - result = layerSupportObject->IsFullyConnectedSupported( + result = layerSupportObject.IsFullyConnectedSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), - OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType), + *weightsInfoPtr, *biasInfoPtr, descriptor, reason); @@ -415,17 +437,17 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); auto cLayer = PolymorphicDowncast(&layer); const GatherDescriptor& descriptor = cLayer->GetParameters(); - result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType), - input1, - OverrideDataType(output, dataType), - descriptor, - reason); + result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType), + input1, + OverrideDataType(output, dataType), + descriptor, + reason); break; } case LayerType::Input: { const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason); + result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason); break; } case LayerType::InstanceNormalization: @@ -436,7 +458,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsInstanceNormalizationSupported( + result = layerSupportObject.IsInstanceNormalizationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, @@ -451,7 +473,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsL2NormalizationSupported( + result = layerSupportObject.IsL2NormalizationSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), descriptor, @@ -466,11 +488,11 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsLogicalBinarySupported(input0, - input1, - output, - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsLogicalBinarySupported(input0, + input1, + output, + cLayer->GetParameters(), + reason); break; } case LayerType::LogSoftmax: @@ -480,10 +502,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Lstm: @@ -617,7 +639,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights; } - result = layerSupportObject->IsLstmSupported( + result = layerSupportObject.IsLstmSupported( input, outputStateIn, cellStateIn, @@ -636,10 +658,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType), - OverrideDataType(input1, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType), + OverrideDataType(input1, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::MemCopy: @@ -647,9 +669,9 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::MemImport: @@ -657,9 +679,9 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::Merge: @@ -668,10 +690,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType), - OverrideDataType(input1, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType), + OverrideDataType(input1, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::Concat: @@ -699,7 +721,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason); + result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason); break; @@ -709,7 +731,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMultiplicationSupported( + result = layerSupportObject.IsMultiplicationSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), @@ -721,16 +743,16 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Output: { const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason); + result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason); break; } case LayerType::Permute: @@ -738,10 +760,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Pad: @@ -749,7 +771,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsPadSupported( + result = layerSupportObject.IsPadSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), @@ -761,26 +783,26 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::PreCompiled: { auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Quantize: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsQuantizeSupported(input, output, reason); + result = layerSupportObject.IsQuantizeSupported(input, output, reason); break; } case LayerType::QLstm: @@ -865,15 +887,15 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(); } - result = layerSupportObject->IsQLstmSupported(input, - previousOutputIn, - previousCellStateIn, - outputStateOut, - cellStateOut, - output, - descriptor, - paramsInfo, - reason); + result = layerSupportObject.IsQLstmSupported(input, + previousOutputIn, + previousCellStateIn, + outputStateOut, + cellStateOut, + output, + descriptor, + paramsInfo, + reason); break; } case LayerType::QuantizedLstm: @@ -919,13 +941,13 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, paramsInfo.m_OutputGateBias = &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();; - result = layerSupportObject->IsQuantizedLstmSupported(input, - previousCellStateIn, - previousOutputIn, - cellStateOut, - output, - paramsInfo, - reason); + result = layerSupportObject.IsQuantizedLstmSupported(input, + previousCellStateIn, + previousOutputIn, + cellStateOut, + output, + paramsInfo, + reason); break; } case LayerType::Division: @@ -933,7 +955,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsDivisionSupported( + result = layerSupportObject.IsDivisionSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), @@ -944,9 +966,9 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsRankSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::Reshape: @@ -954,10 +976,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Resize: @@ -965,10 +987,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Slice: @@ -978,10 +1000,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Softmax: @@ -989,10 +1011,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::SpaceToBatchNd: @@ -1000,10 +1022,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::SpaceToDepth: @@ -1013,10 +1035,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Splitter: @@ -1035,10 +1057,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const std::vector> outputPtrs(outputs.begin(), outputs.end()); - result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType), - outputPtrs, - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType), + outputPtrs, + cLayer->GetParameters(), + reason); break; } case LayerType::Stack: @@ -1064,7 +1086,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason); + result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason); break; } @@ -1103,10 +1125,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, std::vector outputPtrs(beginPtrO, endPtrO); - result = layerSupportObject->IsStandInSupported(inputPtrs, - outputPtrs, - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsStandInSupported(inputPtrs, + outputPtrs, + cLayer->GetParameters(), + reason); break; } case LayerType::StridedSlice: @@ -1114,10 +1136,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::Subtraction: @@ -1125,7 +1147,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsSubtractionSupported( + result = layerSupportObject.IsSubtractionSupported( OverrideDataType(input0, dataType), OverrideDataType(input1, dataType), OverrideDataType(output, dataType), @@ -1138,11 +1160,11 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo(); const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo(); - result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType), - OverrideDataType(input1, dataType), - OverrideDataType(output0, dataType), - OverrideDataType(output1, dataType), - reason); + result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType), + OverrideDataType(input1, dataType), + OverrideDataType(output0, dataType), + OverrideDataType(output1, dataType), + reason); break; } case LayerType::Mean: @@ -1150,7 +1172,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMeanSupported( + result = layerSupportObject.IsMeanSupported( OverrideDataType(input, dataType), OverrideDataType(output, dataType), cLayer->GetParameters(), @@ -1162,10 +1184,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType), - OverrideDataType(input1, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType), + OverrideDataType(input1, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::Prelu: @@ -1173,10 +1195,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType), - OverrideDataType(alpha, dataType), - OverrideDataType(output, dataType), - reason); + result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType), + OverrideDataType(alpha, dataType), + OverrideDataType(output, dataType), + reason); break; } case LayerType::Transpose: @@ -1184,10 +1206,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, auto cLayer = PolymorphicDowncast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } case LayerType::TransposeConvolution2d: @@ -1211,12 +1233,12 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr); const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType); - result = layerSupportObject->IsTransposeConvolution2dSupported(input, - output, - descriptor, - weights, - biases, - reason); + result = layerSupportObject.IsTransposeConvolution2dSupported(input, + output, + descriptor, + weights, + biases, + reason); break; } @@ -1226,10 +1248,10 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsReduceSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - cLayer->GetParameters(), - reason); + result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); break; } default: diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index f92e0745d3..d3857b8357 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -23,6 +23,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources ElementwiseUnaryEndToEndTestImpl.hpp EndToEndTestImpl.hpp FillEndToEndTestImpl.hpp + FullyConnectedEndToEndTestImpl.hpp GatherEndToEndTestImpl.hpp InstanceNormalizationEndToEndTestImpl.cpp InstanceNormalizationEndToEndTestImpl.hpp diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp index b69e11253d..1c4ff709fa 100644 --- a/src/backends/backendsCommon/test/CompatibilityTests.cpp +++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp @@ -7,6 +7,7 @@ #include #include +#include #include @@ -115,3 +116,18 @@ BOOST_AUTO_TEST_CASE(Neon_Cl_DirectCompatibility_Test) } BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE(BackendCapability) + +BOOST_AUTO_TEST_CASE(Backends_Capability_Test) +{ + auto neonBackend = std::make_unique(); + auto clBackend = std::make_unique(); + auto refBackend = std::make_unique(); + + BOOST_CHECK(!neonBackend->HasCapability(armnn::BackendCapability::NonConstWeights)); + BOOST_CHECK(!clBackend->HasCapability(armnn::BackendCapability::NonConstWeights)); + BOOST_CHECK(refBackend->HasCapability(armnn::BackendCapability::NonConstWeights)); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp new file mode 100644 index 0000000000..5a618c32e1 --- /dev/null +++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp @@ -0,0 +1,97 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "CommonTestUtils.hpp" + +#include + +#include + +#include + +#include + +#include + +namespace +{ + +armnn::INetworkPtr CreateFullyConnectedNetworkNonConstWeights(const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const armnn::TensorInfo& weightsTensorInfo, + armnn::FullyConnectedDescriptor descriptor) +{ + armnn::INetworkPtr network(armnn::INetwork::Create()); + + armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input"); + armnn::IConnectableLayer* weightsInputLayer = network->AddInputLayer(1, "Weights_Input"); + armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, + armnn::EmptyOptional(), + armnn::EmptyOptional(), + "Fully_Connected"); + armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output"); + + Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0); + Connect(weightsInputLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1); + Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0); + + return network; +} + +template> +void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector& backends) +{ + using namespace armnn; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 3 }, ArmnnType); + inputTensorInfo.SetQuantizationScale(0.1f); + inputTensorInfo.SetQuantizationOffset(63); + + armnn::TensorInfo outputTensorInfo({ 1, 2 }, ArmnnType); + outputTensorInfo.SetQuantizationScale(5.f); + outputTensorInfo.SetQuantizationOffset(10); + + armnn::TensorInfo weightsTensorInfo({ 2, 6 }, ArmnnType); + weightsTensorInfo.SetQuantizationScale(0.2f); + weightsTensorInfo.SetQuantizationOffset(93); + + FullyConnectedDescriptor descriptor; + descriptor.m_ConstantWeights = false; + descriptor.m_BiasEnabled = false; + descriptor.m_TransposeWeightMatrix = true; + + std::vector inputData { + -1.2f, 6.1f, -3.5f, + 18.8f, -5.5f, 2.9f + }; + + std::vector weightsData { + -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, + 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f + }; + + std::vector floatExpectedOutputData { + -107.04f, 110.f + }; + std::vector expectedOutputData = armnnUtils::QuantizedVector(floatExpectedOutputData); + + armnn::INetworkPtr network = CreateFullyConnectedNetworkNonConstWeights(inputTensorInfo, + outputTensorInfo, + weightsTensorInfo, + descriptor); + + BOOST_TEST_CHECKPOINT("create a network"); + + std::map> inputTensorData = {{ 0, inputData }, {1, weightsData}}; + std::map> expectedOutputTensorData = {{ 0, expectedOutputData }}; + + EndToEndLayerTestImpl(move(network), + inputTensorData, + expectedOutputTensorData, + backends, + 1.0f); +} +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp index c9e2e1602d..9176094eb2 100644 --- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp @@ -67,12 +67,70 @@ LayerTestResult SimpleFullyConnectedTestImpl( return result; } +template +LayerTestResult SimpleFullyConnectedTestWeightsAsInputsImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + armnn::TensorInfo weightsTensorInfo, + armnn::TensorInfo biasesTensorInfo, + boost::multi_array& weights, + boost::multi_array& bias, + boost::multi_array& input, + bool biasEnabled, + bool transposeWeights) +{ + std::unique_ptr input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FullyConnectedQueueDescriptor data; + armnn::WorkloadInfo info; + + AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get()); + AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; + data.m_Parameters.m_ConstantWeights = false; + + std::unique_ptr input2Handle = nullptr; + if (biasEnabled) + { + input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo); + AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get()); + } + + std::unique_ptr workload = workloadFactory.CreateFullyConnected(data, info); + LayerTestResult result(outputTensorInfo); + + input0Handle->Allocate(); + input1Handle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(input0Handle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(input1Handle.get(), &weights[0][0]); + if (biasEnabled) + { + input2Handle->Allocate(); + CopyDataToITensorHandle(input2Handle.get(), &bias[0]); + } + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + + return result; +} + template LayerTestResult FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - bool biasEnabled) + bool biasEnabled, + bool constantWeights) { constexpr static unsigned int inputWidth = 3u; constexpr static unsigned int inputHeight = 2u; @@ -116,15 +174,36 @@ LayerTestResult FullyConnectedTest( auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); - result = SimpleFullyConnectedTestImpl( - workloadFactory, - memoryManager, - tensorHandleFactory, - inputTensorInfo, outputTensorInfo, - weightsDesc, biasesDesc, - weights, bias, input, - biasEnabled, true - ); + if (constantWeights) + { + result = SimpleFullyConnectedTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + weightsDesc, + biasesDesc, + weights, + bias, + input, + biasEnabled, + true); + } + else + { + result = SimpleFullyConnectedTestWeightsAsInputsImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + weightsDesc, + biasesDesc, + weights, + bias, + input, + biasEnabled, + true); + } if (biasEnabled) { @@ -237,14 +316,16 @@ FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - bool biasEnabled); + bool biasEnabled, + bool constWeights); template LayerTestResult, 2> FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - bool biasEnabled); + bool biasEnabled, + bool constWeights); // // Implementation functions diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp index c2d53a5178..ec921f7dd5 100644 --- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp @@ -17,7 +17,8 @@ LayerTestResult FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - bool biasEnabled); + bool biasEnabled, + bool constantWeights); LayerTestResult FullyConnectedFloat32Test( armnn::IWorkloadFactory& workloadFactory, -- cgit v1.2.1