aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2022-05-20 13:26:37 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2022-05-23 18:40:50 +0100
commit311cfa7f5fbaad0f98757465390fc9ad20a2d9a8 (patch)
treed103bfbb739f582d3602c4000568c1c7c2c44735
parent721e629fa07e65d6a53c093518021e71e48eeac2 (diff)
downloadarmnn-311cfa7f5fbaad0f98757465390fc9ad20a2d9a8.tar.gz
MLCE-825: Give reason when workload unsupported for Non Constant Weights/Bias
* BackendHelper.cpp IsXXXLayerSupported doesn't get as far as Neon/Cl Validate functions where arm_compute::Status is returned. * Conv2d, Depthwise, DilatedDepthwise and FullyConnected * Tidy up if() -> if () * Clean up logic in FullyConnected so that isLayerSupported gets called Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I5da1a882f4a2f55e90aa984b2b9548a847cb3a2d
-rw-r--r--src/armnn/BackendHelper.cpp129
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp9
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp9
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp8
7 files changed, 78 insertions, 103 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 2d70d7add0..a5278eb21c 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -76,19 +76,19 @@ bool HasCapability(const BackendOptions::BackendOption& capability, const Backen
{
return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
}
- else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
+ else if (capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
{
return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
}
- else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
+ else if (capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
{
return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
}
- else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
+ else if (capability.GetValue().IsString() && backendCapability.GetValue().IsString())
{
return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
}
- else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
+ else if (capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
{
return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
}
@@ -374,21 +374,32 @@ bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
TensorInfos infos{input, output, weights, biasesVal};
Optional<const BackendOptions::BackendOption> capability ;
- if(!m_BackendId.IsUndefined())
+ if (!m_BackendId.IsUndefined())
{
- capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
- if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+ capability = GetCapability("NonConstWeights", m_BackendId);
+ if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
{
- if(!weights.IsConstant())
+ if (!weights.IsConstant())
{
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+ "Convolution2d weights are set as dynamic (non constant). ";
+ }
return false;
}
- if (descriptor.m_BiasEnabled && !biases.has_value())
+ if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
{
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+ "Convolution2d biases are set as dynamic (non constant). ";
+ }
return false;
}
-
// At the first stage we will only print a warning. this is to give
// backend developers a chance to adopt and read weights from input slots.
ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
@@ -465,21 +476,30 @@ bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
TensorInfos infos{input, output, weights, biasesVal};
Optional<const BackendOptions::BackendOption> capability ;
- if(!m_BackendId.IsUndefined())
+ if (!m_BackendId.IsUndefined())
{
- capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
- if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+ capability = GetCapability("NonConstWeights", m_BackendId);
+ if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
{
- if(!weights.IsConstant())
+ if (!weights.IsConstant())
{
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+ "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
+ }
return false;
}
- if(descriptor.m_BiasEnabled)
+ if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
{
- if(!biases.value().IsConstant())
+ if (reasonIfUnsupported.has_value())
{
- return false;
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+ "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
}
+ return false;
}
// At the first stage we will only print a warning. this is to give
// backend developers a chance to adopt and read weights from input slots.
@@ -544,21 +564,30 @@ bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
TensorInfos infos{input, output, weights, biasesVal};
Optional<const BackendOptions::BackendOption> capability ;
- if(!m_BackendId.IsUndefined())
+ if (!m_BackendId.IsUndefined())
{
- capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
- if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+ capability = GetCapability("NonConstWeights", m_BackendId);
+ if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
{
- if(!weights.IsConstant())
+ if (!weights.IsConstant())
{
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+ "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
+ }
return false;
}
- if(descriptor.m_BiasEnabled)
+ if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
{
- if(!biases.value().IsConstant())
+ if (reasonIfUnsupported.has_value())
{
- return false;
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+ "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
}
+ return false;
}
// At the first stage we will only print a warning. this is to give
// backend developers a chance to adopt and read weights from input slots.
@@ -657,34 +686,44 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- if(!m_BackendId.IsUndefined())
+ TensorInfos infos{input, output, weights, biases};
+
+ Optional<const BackendOptions::BackendOption> capability;
+ if (!m_BackendId.IsUndefined())
{
- auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
- if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+ capability = GetCapability("NonConstWeights", m_BackendId);
+ if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
{
- if(!weights.IsConstant())
+ if (!descriptor.m_ConstantWeights)
+ {
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+ "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
+ }
+ return false;
+ }
+ if (!weights.IsConstant())
{
if (reasonIfUnsupported.has_value())
{
reasonIfUnsupported.value() =
- "This backend might not support non constant weights. "
- "If weights are constant make sure to set IsConstant when creating TensorInfo";
+ "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+ "FullyConnected weights are set as dynamic (non constant). ";
}
return false;
}
- if(descriptor.m_BiasEnabled)
+ if (descriptor.m_BiasEnabled && !biases.IsConstant())
{
- if(!biases.IsConstant())
+ if (reasonIfUnsupported.has_value())
{
- if (reasonIfUnsupported.has_value())
- {
- reasonIfUnsupported.value() =
- "This backend might not support non constant bias. "
- "If bias are constant make sure to set IsConstant when creating TensorInfo";
- }
- return false;
+ reasonIfUnsupported.value() =
+ "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+ "FullyConnected biases are set as dynamic (non constant). ";
}
+ return false;
}
// At the first stage we will only print a warning. this is to give
@@ -694,20 +733,8 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
"doxygen documentation on github https://github.com/ARM-software/armnn "
"under the keyword 'ConstTensorsAsInputs'.";
}
-
- if(!descriptor.m_ConstantWeights)
- {
- capability = GetCapability("NonConstWeights", m_BackendId);
- if (capability.has_value() && capability.value().GetValue().AsBool() == true)
- {
- return true;
- }
- return false;
- }
}
- TensorInfos infos{input, output, weights, biases};
-
return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
infos,
descriptor,
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 762645bfba..6b0a3b8352 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -28,15 +28,6 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
bool isFastMathEnabled,
const ActivationDescriptor* activationDescriptor)
{
- // The arm_compute::CLConvolutionLayer supports both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN ClConvolution2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 3a972d3f39..42fe400041 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -30,15 +30,6 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const Optional<TensorInfo>& biases,
const ActivationDescriptor* activationDescriptor)
{
- // The CL implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN ClDepthwiseConv2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index c2da5f297a..0e1efe0239 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -23,14 +23,6 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
const ActivationDescriptor* activationDescriptor)
{
- // The CL implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "Arm NN ClFullyConnectedWorkload does not support non constant weights."};
- }
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 12d8c460f9..586b9c9849 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -29,15 +29,6 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
bool isFastMathEnabled,
const ActivationDescriptor* activationDescriptor)
{
- // arm_compute::NEConvolutionLayer supports both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN NeonConvolution2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 9eeac6e2a3..e2d0a8200f 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -33,15 +33,6 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const Optional<TensorInfo>& biases,
const ActivationDescriptor* activationDescriptor)
{
- // The Neon implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN NeonDepthwiseConv2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index d3716806b3..0b91eb37c2 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -28,14 +28,6 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
const ActivationDescriptor* activationDescriptor)
{
- // The NEON implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "Arm NN NeonFullyConnectedWorkload does not support non constant weights."};
- }
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);