From aba2f912a21487776e540724cf354a6cd8d89829 Mon Sep 17 00:00:00 2001 From: morgolock Date: Tue, 5 May 2020 16:28:19 +0100 Subject: COMPMID-3289: Test improvement CLGEMMMatrixMultiplyReshapedKernel. Change-Id: Ia6c2f115849889baceafaf716477456e41f96037 Signed-off-by: morgolock Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3186 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Sang-Hoon Park --- tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp | 166 ++++++++++++++++++++- 1 file changed, 160 insertions(+), 6 deletions(-) (limited to 'tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp') diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp index a333d1ca02..833a9240bf 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -88,7 +88,7 @@ RelativeTolerance rel_tolerance_f16(0.001f); constexpr float abs_tolerance_f16(0.01f); /** M values to test */ -const auto m_values = framework::dataset::make("M", 37); +const auto m_values = framework::dataset::make("M", 17); /** M_W values to test */ const auto m_w_values = framework::dataset::make("M_W", 5); @@ -97,18 +97,17 @@ const auto m_w_values = framework::dataset::make("M_W", 5); const auto m_h_values = framework::dataset::make("M_H", 7); /** N values to test */ -const auto n_values = framework::dataset::make("N", 51); +const auto n_values = framework::dataset::make("N", 21); /** K values to test */ -const auto k_values = framework::dataset::make("K", 23); +const auto k_values = framework::dataset::make("K", 13); /** Batch size values to test */ -const auto b_values = framework::dataset::make("batch_size", 1, 3); +const auto b_values = framework::dataset::make("batch_size", 2, 3); /** Activation values to test */ const auto act_values = framework::dataset::make("Activation", { - ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), }); @@ -169,6 +168,161 @@ const auto lhs_transpose_values = framework::dataset::make("lhs_transpose", { fa TEST_SUITE(CL) TEST_SUITE(GEMMMatrixMultiplyReshaped) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("Input0Info", { TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::F32), // OK + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::F16), // OK + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::QASYMM8), // Data type not supported + TensorInfo(TensorShape(10U, 5U, 2U), 1, DataType::F32), // Incorrect dimension bias + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::F16), // OK, do not broadcast bias + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::F16), // OK, wider accummulation + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::F16), // OK, RHS 4,4,2 + + }), + framework::dataset::make("Input1Info",{ TensorInfo(TensorShape(64U, 6U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 6U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(64U, 5U, 2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(64U, 6U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 6U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(64U, 6U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(128U, 3U, 2U), 1, DataType::F16), + + })), + framework::dataset::make("Input2Info", { TensorInfo(TensorShape(21U), 1, DataType::F32), + TensorInfo(TensorShape(21U), 1, DataType::F16), + TensorInfo(TensorShape(21U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(21U), 1, DataType::F32), + TensorInfo(TensorShape(21U), 1, DataType::F32), + TensorInfo(TensorShape(21U,17U), 1, DataType::F16), + TensorInfo(TensorShape(21U,17U), 1, DataType::F16), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F16), + + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F32), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F16), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F32), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F32), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F16), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F16), + TensorInfo(TensorShape(21U,17U,2U), 1, DataType::F16), + + })), + framework::dataset::make("LHSMInfo",{ + GEMMLHSMatrixInfo(4,4,1,false,true), + GEMMLHSMatrixInfo(4,4,1,false,true), + GEMMLHSMatrixInfo(4,4,1,false,true), + GEMMLHSMatrixInfo(4,2,4,false,false), + GEMMLHSMatrixInfo(4,2,4,false,false), + GEMMLHSMatrixInfo(4,4,1,false,true), + GEMMLHSMatrixInfo(4,4,1,false,true), + GEMMLHSMatrixInfo(4,4,1,false,true), + + })), + framework::dataset::make("RHSMInfo",{ + GEMMRHSMatrixInfo(4,4,1,true,true), + GEMMRHSMatrixInfo(4,4,1, true,true), + GEMMRHSMatrixInfo(4,4,1,true,true), + GEMMRHSMatrixInfo(2,2,1,true,false), + GEMMRHSMatrixInfo(2,2,1,true,false), + GEMMRHSMatrixInfo(4,4,1,true,true), + GEMMRHSMatrixInfo(4,4,1,true,true), + GEMMRHSMatrixInfo(4,4,2,true,false), + + + })), + + + framework::dataset::make("GEMMInfo",{ + GEMMKernelInfo( 17 /**set_is_resizable(true), + &input1_info.clone()->set_is_resizable(true), + &input2_info.clone()->set_is_resizable(true), + &output_info.clone()->set_is_resizable(true),1.f,1.f, + lhs_info, + rhs_info, + gemm_info)) == expected, framework::LogLevel::ERRORS); +} TEST_SUITE(Float) TEST_SUITE(FP32) -- cgit v1.2.1