From 311cfa7f5fbaad0f98757465390fc9ad20a2d9a8 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Fri, 20 May 2022 13:26:37 +0100 Subject: MLCE-825: Give reason when workload unsupported for Non Constant Weights/Bias * BackendHelper.cpp IsXXXLayerSupported doesn't get as far as Neon/Cl Validate functions where arm_compute::Status is returned. * Conv2d, Depthwise, DilatedDepthwise and FullyConnected * Tidy up if() -> if () * Clean up logic in FullyConnected so that isLayerSupported gets called Signed-off-by: Francis Murtagh Signed-off-by: Teresa Charlin Change-Id: I5da1a882f4a2f55e90aa984b2b9548a847cb3a2d --- src/backends/cl/workloads/ClConvolution2dWorkload.cpp | 9 --------- src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp | 9 --------- src/backends/cl/workloads/ClFullyConnectedWorkload.cpp | 8 -------- 3 files changed, 26 deletions(-) (limited to 'src/backends/cl') diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp index 762645bfba..6b0a3b8352 100644 --- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp +++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp @@ -28,15 +28,6 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input, bool isFastMathEnabled, const ActivationDescriptor* activationDescriptor) { - // The arm_compute::CLConvolutionLayer supports both const and non const - // weights. However, in the case of non const weights we'd have to call - // prepare or configure for each inference which we're not setup to do just yet. - if (!weights.IsConstant()) - { - return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, - "ArmNN ClConvolution2dWorkload does not support non constant weights."}; - } - const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout); diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp index 3a972d3f39..42fe400041 100644 --- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp +++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp @@ -30,15 +30,6 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp const Optional& biases, const ActivationDescriptor* activationDescriptor) { - // The CL implemented workload does support both const and non const - // weights. However, in the case of non const weights we'd have to call - // prepare or configure for each inference which we're not setup to do just yet. - if (!weights.IsConstant()) - { - return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, - "ArmNN ClDepthwiseConv2dWorkload does not support non constant weights."}; - } - const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp index c2da5f297a..0e1efe0239 100644 --- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp +++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp @@ -23,14 +23,6 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input, const FullyConnectedDescriptor& descriptor, const ActivationDescriptor* activationDescriptor) { - // The CL implemented workload does support both const and non const - // weights. However, in the case of non const weights we'd have to call - // prepare or configure for each inference which we're not setup to do just yet. - if (!weights.IsConstant()) - { - return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, - "Arm NN ClFullyConnectedWorkload does not support non constant weights."}; - } const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output); arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights); -- cgit v1.2.1