aboutsummaryrefslogtreecommitdiff
path: root/src/gpu/cl/kernels
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2023-01-04 10:04:26 +0000
committerSiCong Li <sicong.li@arm.com>2023-01-10 10:15:37 +0000
commit1b2f868b7b55e3e952520f0380e9174696c3ad1b (patch)
tree207552d4b1db84389e7a3d6f6b15bc512240d982 /src/gpu/cl/kernels
parent3cce35dcad8bc8f53a1e6613f719af9ab04feda6 (diff)
downloadComputeLibrary-1b2f868b7b55e3e952520f0380e9174696c3ad1b.tar.gz
Fix CL DirectConvolutionLayer validate tests
* Add missing activation infos * Remove faulty test "Shrink window" * Split the tests based on data layout * Fix ClDirectConv2dKernel::validate logic Fused activation in NCHW is not supported at all Resolves: COMPMID-5801 Change-Id: I64dfbd24b77bb02fb4a88b73d5ef84676d85b4fd Signed-off-by: SiCong Li <sicong.li@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8899 Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/gpu/cl/kernels')
-rw-r--r--src/gpu/cl/kernels/ClDirectConv2dKernel.cpp3
1 files changed, 2 insertions, 1 deletions
diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
index b66163c805..5f882e3a28 100644
--- a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
+++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
@@ -73,7 +73,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co
ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9) && std::get<0>(conv_info.stride()) > 2,
"Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_data_type_float(src->data_type()) && act_info.enabled(), "Activation supported only for floating point and NHWC.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(act_info.enabled(), "Fused activation is not supported for NCHW layout");
if(is_data_type_quantized(src->data_type()))
{
@@ -89,6 +89,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co
if(data_layout == DataLayout::NHWC)
{
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(act_info.enabled() && !is_data_type_float(src->data_type()), "Fused activation in NHWC is only supported for floating point.");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.n0 != 1 && desc.n0 != 2 && desc.n0 != 3 && desc.n0 != 4 && desc.n0 != 8 && desc.n0 != 16,
"N0 can only be: 1, 2, 3, 4, 8, and 16");