aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads')
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp19
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp62
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp2
3 files changed, 44 insertions, 39 deletions
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index b122be62ce..9eeac6e2a3 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -33,6 +33,15 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const Optional<TensorInfo>& biases,
const ActivationDescriptor* activationDescriptor)
{
+ // The Neon implemented workload does support both const and non const
+ // weights. However, in the case of non const weights we'd have to call
+ // prepare or configure for each inference which we're not setup to do just yet.
+ if (!weights.IsConstant())
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
+ "ArmNN NeonDepthwiseConv2dWorkload does not support non constant weights."};
+ }
+
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -50,14 +59,22 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input, descriptor.m_DataLayout);
// Convert the weights into the compute library format
- const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
+ arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
+ aclWeightsInfo.set_are_values_constant(weights.IsConstant());
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo* optionalAclBiasesInfo = nullptr;
if (descriptor.m_BiasEnabled)
{
ARMNN_ASSERT(biases.has_value());
+ // Same for bias as weights. We don't currently support non const.
+ if (!biases.value().IsConstant())
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
+ "ArmNN NeonDepthwiseConv2dWorkload does not support non constant bias."};
+ }
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+ aclBiasesInfo.set_are_values_constant(biases.value().IsConstant());
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 26c68b7d1d..d3716806b3 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -28,22 +28,37 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
const ActivationDescriptor* activationDescriptor)
{
+ // The NEON implemented workload does support both const and non const
+ // weights. However, in the case of non const weights we'd have to call
+ // prepare or configure for each inference which we're not setup to do just yet.
+ if (!weights.IsConstant())
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
+ "Arm NN NeonFullyConnectedWorkload does not support non constant weights."};
+ }
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
- const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
+ arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
+ aclWeights.set_are_values_constant(weights.IsConstant());
arm_compute::TensorInfo aclBiases;
arm_compute::TensorInfo* optionalAclBiases = nullptr;
if (descriptor.m_BiasEnabled)
{
ARMNN_ASSERT(biases.has_value());
+ // Same for bias as weights. We don't currently support non const.
+ if (!biases.value().IsConstant())
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
+ "Arm NN NeonFullyConnectedWorkload does not support non constant bias."};
+ }
aclBiases = BuildArmComputeTensorInfo(biases.value());
+ aclBiases.set_are_values_constant(biases.value().IsConstant());
optionalAclBiases = &aclBiases;
}
const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
-
return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
&aclWeights,
optionalAclBiases,
@@ -61,17 +76,20 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ // Copy the weights' tensor into arm_compute tensor.
m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
-
+ InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
+
if (m_Data.m_Parameters.m_BiasEnabled)
{
+ // Copy the biases tensor into arm_compute tensor.
m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
+ BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
+ InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
}
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
-
arm_compute::FullyConnectedLayerInfo fc_info =
ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo);
@@ -79,28 +97,6 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
m_FullyConnectedLayer.reset(layer.release());
- // Allocate
- if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8)
- {
- InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
- }
- else
- {
- InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
- }
-
- if (m_BiasesTensor)
- {
- if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
- {
- InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
- }
- else
- {
- InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
- }
- }
-
// Add details for profiling output
WorkloadInfo detailsInfo;
@@ -118,10 +114,10 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
detailsInfo,
this->GetGuid());
- // Force Compute Library to perform the necessary copying and reshaping, after which
- // delete all the input tensors that will no longer be needed
+ // Force Compute Library to perform the necessary copying and reshaping.
m_FullyConnectedLayer->prepare();
- FreeUnusedTensors();
+ FreeTensorIfUnused(m_WeightsTensor);
+ FreeTensorIfUnused(m_BiasesTensor);
}
void NeonFullyConnectedWorkload::Execute() const
@@ -130,10 +126,4 @@ void NeonFullyConnectedWorkload::Execute() const
m_FullyConnectedLayer->run();
}
-void NeonFullyConnectedWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_WeightsTensor);
- FreeTensorIfUnused(m_BiasesTensor);
-}
-
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
index 419a3299f2..944731d7bd 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
@@ -33,11 +33,9 @@ public:
private:
std::unique_ptr<arm_compute::IFunction> m_FullyConnectedLayer;
-
std::unique_ptr<arm_compute::Tensor> m_WeightsTensor;
std::unique_ptr<arm_compute::Tensor> m_BiasesTensor;
- void FreeUnusedTensors();
};
} //namespace armnn