aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2022-05-20 13:26:37 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2022-05-23 18:40:50 +0100
commit311cfa7f5fbaad0f98757465390fc9ad20a2d9a8 (patch)
treed103bfbb739f582d3602c4000568c1c7c2c44735 /src/backends/cl
parent721e629fa07e65d6a53c093518021e71e48eeac2 (diff)
downloadarmnn-311cfa7f5fbaad0f98757465390fc9ad20a2d9a8.tar.gz
MLCE-825: Give reason when workload unsupported for Non Constant Weights/Bias
* BackendHelper.cpp IsXXXLayerSupported doesn't get as far as Neon/Cl Validate functions where arm_compute::Status is returned. * Conv2d, Depthwise, DilatedDepthwise and FullyConnected * Tidy up if() -> if () * Clean up logic in FullyConnected so that isLayerSupported gets called Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I5da1a882f4a2f55e90aa984b2b9548a847cb3a2d
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp8
3 files changed, 0 insertions, 26 deletions
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 762645bfba..6b0a3b8352 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -28,15 +28,6 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
bool isFastMathEnabled,
const ActivationDescriptor* activationDescriptor)
{
- // The arm_compute::CLConvolutionLayer supports both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN ClConvolution2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 3a972d3f39..42fe400041 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -30,15 +30,6 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const Optional<TensorInfo>& biases,
const ActivationDescriptor* activationDescriptor)
{
- // The CL implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN ClDepthwiseConv2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index c2da5f297a..0e1efe0239 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -23,14 +23,6 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
const ActivationDescriptor* activationDescriptor)
{
- // The CL implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "Arm NN ClFullyConnectedWorkload does not support non constant weights."};
- }
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);