From 1b2f868b7b55e3e952520f0380e9174696c3ad1b Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Wed, 4 Jan 2023 10:04:26 +0000 Subject: Fix CL DirectConvolutionLayer validate tests * Add missing activation infos * Remove faulty test "Shrink window" * Split the tests based on data layout * Fix ClDirectConv2dKernel::validate logic Fused activation in NCHW is not supported at all Resolves: COMPMID-5801 Change-Id: I64dfbd24b77bb02fb4a88b73d5ef84676d85b4fd Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8899 Reviewed-by: Jakub Sujak Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- src/gpu/cl/kernels/ClDirectConv2dKernel.cpp | 3 +- tests/validation/CL/DirectConvolutionLayer.cpp | 121 ++++++++++++++++++----- tests/validation/NEON/DirectConvolutionLayer.cpp | 14 +-- 3 files changed, 103 insertions(+), 35 deletions(-) diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp index b66163c805..5f882e3a28 100644 --- a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp +++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp @@ -73,7 +73,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution."); ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9) && std::get<0>(conv_info.stride()) > 2, "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_data_type_float(src->data_type()) && act_info.enabled(), "Activation supported only for floating point and NHWC."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(act_info.enabled(), "Fused activation is not supported for NCHW layout"); if(is_data_type_quantized(src->data_type())) { @@ -89,6 +89,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co if(data_layout == DataLayout::NHWC) { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(act_info.enabled() && !is_data_type_float(src->data_type()), "Fused activation in NHWC is only supported for floating point."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.n0 != 1 && desc.n0 != 2 && desc.n0 != 3 && desc.n0 != 4 && desc.n0 != 8 && desc.n0 != 16, "N0 can only be: 1, 2, 3, 4, 8, and 16"); diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index f026bfe0b0..512c2cab97 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -185,52 +185,36 @@ TEST_CASE(NonSquareKernel, framework::DatasetMode::PRECOMMIT) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid: Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid: Mismatching input feature maps TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported biases dimensions TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16), TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(11U, 11U, 2U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32), })), framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32), - TensorInfo(TensorShape(4U), 1, DataType::F32), - TensorInfo(TensorShape(4U), 1, DataType::F32), - TensorInfo(TensorShape(4U), 1, DataType::F32), TensorInfo(TensorShape(4U), 1, DataType::F32), TensorInfo(TensorShape(4U), 1, DataType::F32), TensorInfo(TensorShape(3U), 1, DataType::F32), TensorInfo(TensorShape(4U, 2U), 1, DataType::F32), TensorInfo(TensorShape(4U), 1, DataType::F32), TensorInfo(TensorShape(4U), 1, DataType::F32), - TensorInfo(TensorShape(4U), 1, DataType::F32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), @@ -238,23 +222,27 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), - PadStrideInfo(3, 3, 0, 0), - PadStrideInfo(1, 1, 0, 0), - PadStrideInfo(1, 1, 0, 0), - PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), })), framework::dataset::make("ActivationInfo", { + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, false, true })), input_info, weights_info, biases_info, output_info, conv_info, act_info, expected) { bool is_valid = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } +// clang-format on +// *INDENT-ON* template using CLDirectConvolutionLayerFixture = DirectConvolutionValidationFixture; @@ -270,6 +258,46 @@ template using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture; TEST_SUITE(NHWC) +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputInfo", { + TensorInfo(TensorShape(2U, 27U, 13U), 1, DataType::F32, DataLayout::NHWC), // Arbitrary weight sizes for NHWC are supported + TensorInfo(TensorShape(2U, 27U, 13U), 1, DataType::F32, DataLayout::NHWC), // Non-rectangular weights dimensions for NHWC are supported + TensorInfo(TensorShape(2U, 27U, 13U), 1, DataType::F32, DataLayout::NHWC), // Strides > 2 for any kernel sizes for NHWC are supported + }), + framework::dataset::make("WeightsInfo",{ + TensorInfo(TensorShape(2U, 13U, 13U, 4U), 1, DataType::F32, DataLayout::NHWC), + TensorInfo(TensorShape(2U, 5U, 3U, 4U), 1, DataType::F32, DataLayout::NHWC), + TensorInfo(TensorShape(2U, 3U, 3U, 4U), 1, DataType::F32, DataLayout::NHWC), + })), + framework::dataset::make("BiasesInfo",{ + TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC), + TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC), + TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC), + })), + framework::dataset::make("OutputInfo",{ + TensorInfo(TensorShape(4U, 15U, 1U), 1, DataType::F32, DataLayout::NHWC), + TensorInfo(TensorShape(4U, 23U, 11U), 1, DataType::F32, DataLayout::NHWC), + TensorInfo(TensorShape(4U, 9U, 4U), 1, DataType::F32, DataLayout::NHWC), + })), + framework::dataset::make("ConvInfo", { + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(3, 3, 0, 0), + })), + framework::dataset::make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), +})), + framework::dataset::make("Expected", { true, true, true })), + input_info, weights_info, biases_info, output_info, conv_info, act_info, expected) +{ + bool is_valid = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(zip(zip(zip(zip(zip(zip( @@ -479,9 +507,48 @@ TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized TEST_SUITE_END() // NHWC +TEST_SUITE(NCHW) +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputInfo", { + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, DataLayout::NCHW), // Unsupported kernel width + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, DataLayout::NCHW), // Non-rectangular weights dimensions are unsupported + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, DataLayout::NCHW) // Unsupported stride + }), + framework::dataset::make("WeightsInfo",{ + TensorInfo(TensorShape(11U, 11U, 2U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, DataLayout::NCHW) + })), + framework::dataset::make("BiasesInfo",{ + TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NCHW) + })), + framework::dataset::make("OutputInfo",{ + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(23U, 11U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, DataLayout::NCHW) + })), + framework::dataset::make("ConvInfo", { + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(3, 3, 0, 0) + })), + framework::dataset::make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +})), + framework::dataset::make("Expected", { false, false, false})), + input_info, weights_info, biases_info, output_info, conv_info, act_info, expected) +{ + bool is_valid = bool(CLDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} // clang-format on // *INDENT-ON* -TEST_SUITE(NCHW) + TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType", DataType::F16)), diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index 0f4c6bb279..73c1a5c3e3 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -220,14 +220,14 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid: Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid: Mismatching input feature maps TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported non-rectangular weights dimensions TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported stride + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported biases dimensions TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size }), framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16), -- cgit v1.2.1