From 451c309179b784d19d333da31aec5a871c3ff2b6 Mon Sep 17 00:00:00 2001 From: Ramy Elgammal Date: Tue, 1 Feb 2022 23:01:27 +0000 Subject: Revert "Rework gemm_mm_reshaped_only_rhs_ kernels with new macros" This reverts commit 10e88a7351 "Rework gemm_mm_reshaped_only_rhs_ kernels with new macros" Resolves: COMPMID-5095 Signed-off-by: Ramy Elgammal Change-Id: I46e167882f072e7508b6101d295accb6e089e740 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7045 Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp | 71 ++++++++++++++++------ tests/validation/fixtures/GEMMFixture.h | 1 - 2 files changed, 52 insertions(+), 20 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp index 860082f32b..cfd98bd8f0 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp @@ -107,12 +107,6 @@ const auto act_values = framework::dataset::make("Activation", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 10.f), }); -/** Activation values to test */ -const auto act_identity = framework::dataset::make("Activation", -{ - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::IDENTITY), -}); - /** M0 values to test - precommit */ const auto m0_values_precommit = framework::dataset::make("M0", { 4 }); @@ -164,8 +158,8 @@ const auto boundary_handling_cases = combine(combine(combine(combine(combine(com framework::dataset::make("export_to_cl_image_rhs", {true, false})), // Only need to test F32 as F16 shares identical boundary handling logics framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("alpha", 1.0f )), - framework::dataset::make("beta", 0.0f )), + framework::dataset::make("alpha", -0.75f )), + framework::dataset::make("beta", -0.35f )), broadcast_bias_values), framework::dataset::make("Activation", ActivationLayerInfo())); @@ -176,7 +170,7 @@ experimental::PostOpList post_ops_1() experimental::PostOpList post_ops{}; post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F}); post_ops.push_back_op>( - std::make_tuple(false, false, false), + std::make_tuple(true, true, false), // If broadcast in dims 0, 1 and 2 0, ConvertPolicy::SATURATE); post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); @@ -186,7 +180,7 @@ experimental::PostOpList post_ops_2() { experimental::PostOpList post_ops{}; post_ops.push_back_op>( - std::make_tuple(false, false, false), + std::make_tuple(false, true, true), // If broadcast in dims 0, 1 and 2 1, ConvertPolicy::SATURATE); post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); @@ -195,18 +189,44 @@ experimental::PostOpList post_ops_2() experimental::PostOpList post_ops_3() { experimental::PostOpList post_ops{}; + post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); post_ops.push_back_op>( - std::make_tuple(false, false, false), + std::make_tuple(false, false, true), // If broadcast in dims 0, 1 and 2 1, ConvertPolicy::SATURATE); return post_ops; } - +// To test that the output of the main op is the first parameter in prelu post op +experimental::PostOpList post_ops_4() +{ + experimental::PostOpList post_ops{}; + post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F}); + post_ops.push_back_op>( + std::make_tuple(false, false, true), // If true, broadcast in corresponding dim: 0, 1 or 2 + 0, + ConvertPolicy::SATURATE); + post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); + return post_ops; +} +// To test that the output of the main op is the second parameter in prelu post op i.e. it is the alpha_param +experimental::PostOpList post_ops_5() +{ + experimental::PostOpList post_ops{}; + post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F}); + post_ops.push_back_op>( + std::make_tuple(false, false, false), // If true, broadcast in corresponding dim: 0, 1 or 2 + 1, + ConvertPolicy::SATURATE); + post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); + return post_ops; +} /** Different Post Op Lists */ const auto post_op_lists = framework::dataset::make("post_op_lists", { post_ops_1(), post_ops_2(), - post_ops_3() + post_ops_3(), + post_ops_4(), + post_ops_5() } ); bool is_post_op_list_valid(unsigned int m, unsigned int n, unsigned int k, unsigned int batch, DataType data_type, const experimental::PostOpList& post_ops) @@ -446,7 +466,20 @@ TEST_CASE(BroadcastInBothXandYDims, framework::DatasetMode::ALL) ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS); } +TEST_CASE(BroadcastInAllDims, framework::DatasetMode::ALL) +{ + const auto data_type = DataType::F32; + const unsigned int m = 22; + const unsigned int n = 16; + const unsigned int k = 15; + const unsigned int batch = 3; + TensorShape post_op_arg_shape(1, 1, 1); + TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type); + experimental::PostOpList post_ops{}; + post_ops.push_back_op>( &post_op_arg_info, 0, ConvertPolicy::SATURATE); + ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS); +} TEST_SUITE_END() // Valid TEST_SUITE_END() // ValidateFusedPostOps TEST_SUITE(Float) @@ -600,7 +633,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt i_values_rhs), t_values_rhs), framework::dataset::make("export_to_cl_image_rhs", {false, true})), - framework::dataset::make("has_pad_y", {false})), + framework::dataset::make("has_pad_y", {false, true})), framework::dataset::make("DataType", DataType::F32)), a_values), beta_values), @@ -632,7 +665,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixtur i_values_rhs), t_values_rhs), framework::dataset::make("export_to_cl_image_rhs", {false, true})), - framework::dataset::make("has_pad_y", {false})), + framework::dataset::make("has_pad_y", {false, true})), framework::dataset::make("DataType", DataType::F32)), a_values), beta_values), @@ -669,7 +702,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSWithPost a_values), beta_values), framework::dataset::make("broadcast_bias", { false } )), - act_identity), + act_values), post_op_lists) ) { @@ -766,7 +799,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt i_values_rhs), t_values_rhs), framework::dataset::make("export_to_cl_image_rhs", true)), - framework::dataset::make("has_pad_y", {false})), + framework::dataset::make("has_pad_y", {false, true})), framework::dataset::make("DataType", DataType::F16)), a_values), beta_values), @@ -798,7 +831,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixtur i_values_rhs), t_values_rhs), framework::dataset::make("export_to_cl_image_rhs", true)), - framework::dataset::make("has_pad_y", {false})), + framework::dataset::make("has_pad_y", {false, true})), framework::dataset::make("DataType", DataType::F16)), a_values), beta_values), @@ -834,7 +867,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSWithPost a_values), beta_values), framework::dataset::make("broadcast_bias", { false } )), - act_identity), + act_values), post_op_lists) ) { diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index 95dcd70104..8b748032fe 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -1551,7 +1551,6 @@ public: const TensorShape bias_shape(n, broadcast_bias ? 1 : m, broadcast_bias ? 1 : batch_size); - auto post_ops_with_shapes = experimental::transform_post_op_list_arguments(post_ops, [ = ](auto broadcast) { -- cgit v1.2.1