aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp8
3 files changed, 0 insertions, 26 deletions
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 762645bfba..6b0a3b8352 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -28,15 +28,6 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
bool isFastMathEnabled,
const ActivationDescriptor* activationDescriptor)
{
- // The arm_compute::CLConvolutionLayer supports both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN ClConvolution2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 3a972d3f39..42fe400041 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -30,15 +30,6 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const Optional<TensorInfo>& biases,
const ActivationDescriptor* activationDescriptor)
{
- // The CL implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "ArmNN ClDepthwiseConv2dWorkload does not support non constant weights."};
- }
-
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index c2da5f297a..0e1efe0239 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -23,14 +23,6 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
const FullyConnectedDescriptor& descriptor,
const ActivationDescriptor* activationDescriptor)
{
- // The CL implemented workload does support both const and non const
- // weights. However, in the case of non const weights we'd have to call
- // prepare or configure for each inference which we're not setup to do just yet.
- if (!weights.IsConstant())
- {
- return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
- "Arm NN ClFullyConnectedWorkload does not support non constant weights."};
- }
const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);