From 4452baf3d295164877c5810a3867b1d2d79b04f3 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Fri, 13 May 2022 09:55:59 +0100 Subject: IVGCVSW-6260 ConstTensorsAsInput: Fully Connected Cl and Neon support. * IVGCVSW-6940 ConstTensorsAsInput: DepthwiseConvolution2d - Complete Neon and Cl Bug Fix * Bug fix to enable Cl and Neon Backend Compatibility ConstantTensorsAsInputs * Updated Cl and Neon FullyConnected workloads to handle constant weights and bias as inputs rather than reading from member variables. * Prevent non const weights and biases passing CL and NEON validate for Depthwise Convolution. Signed-off-by: Cathal Corbett Change-Id: I0f505ff5998a183152f843d0f6cc74327ba920e7 --- .../neon/workloads/NeonFullyConnectedWorkload.cpp | 62 +++++++++------------- 1 file changed, 26 insertions(+), 36 deletions(-) (limited to 'src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp') diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp index 26c68b7d1d..d3716806b3 100644 --- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp +++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp @@ -28,22 +28,37 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input, const FullyConnectedDescriptor& descriptor, const ActivationDescriptor* activationDescriptor) { + // The NEON implemented workload does support both const and non const + // weights. However, in the case of non const weights we'd have to call + // prepare or configure for each inference which we're not setup to do just yet. + if (!weights.IsConstant()) + { + return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, + "Arm NN NeonFullyConnectedWorkload does not support non constant weights."}; + } const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output); - const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights); + arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights); + aclWeights.set_are_values_constant(weights.IsConstant()); arm_compute::TensorInfo aclBiases; arm_compute::TensorInfo* optionalAclBiases = nullptr; if (descriptor.m_BiasEnabled) { ARMNN_ASSERT(biases.has_value()); + // Same for bias as weights. We don't currently support non const. + if (!biases.value().IsConstant()) + { + return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, + "Arm NN NeonFullyConnectedWorkload does not support non constant bias."}; + } aclBiases = BuildArmComputeTensorInfo(biases.value()); + aclBiases.set_are_values_constant(biases.value().IsConstant()); optionalAclBiases = &aclBiases; } const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo = ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor); - return arm_compute::NEFullyConnectedLayer::validate(&aclInput, &aclWeights, optionalAclBiases, @@ -61,17 +76,20 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); + // Copy the weights' tensor into arm_compute tensor. m_WeightsTensor = std::make_unique(); BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo()); - + InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight); + if (m_Data.m_Parameters.m_BiasEnabled) { + // Copy the biases tensor into arm_compute tensor. m_BiasesTensor = std::make_unique(); - BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo()); + BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo()); + InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias); } const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); - arm_compute::FullyConnectedLayerInfo fc_info = ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo); @@ -79,28 +97,6 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info); m_FullyConnectedLayer.reset(layer.release()); - // Allocate - if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8) - { - InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight); - } - else - { - InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight); - } - - if (m_BiasesTensor) - { - if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32) - { - InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias); - } - else - { - InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias); - } - } - // Add details for profiling output WorkloadInfo detailsInfo; @@ -118,10 +114,10 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue detailsInfo, this->GetGuid()); - // Force Compute Library to perform the necessary copying and reshaping, after which - // delete all the input tensors that will no longer be needed + // Force Compute Library to perform the necessary copying and reshaping. m_FullyConnectedLayer->prepare(); - FreeUnusedTensors(); + FreeTensorIfUnused(m_WeightsTensor); + FreeTensorIfUnused(m_BiasesTensor); } void NeonFullyConnectedWorkload::Execute() const @@ -130,10 +126,4 @@ void NeonFullyConnectedWorkload::Execute() const m_FullyConnectedLayer->run(); } -void NeonFullyConnectedWorkload::FreeUnusedTensors() -{ - FreeTensorIfUnused(m_WeightsTensor); - FreeTensorIfUnused(m_BiasesTensor); -} - } //namespace armnn -- cgit v1.2.1