From 0d27b2ee8d811d66693555ac1e7be44d93e662e2 Mon Sep 17 00:00:00 2001 From: Jakub Sujak Date: Thu, 24 Aug 2023 14:01:20 +0100 Subject: Remove legacy PostOps code PostOps was the experimental interface for Dynamic Fusion. It is now replaced by the new Dynamic Fusion interface with code generation using the Compute Kernel Writer. Resolves: COMPMID-6190 Change-Id: I813b48facef2fd6f3aee332588886b4f9b3d33d8 Signed-off-by: Jakub Sujak Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10219 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: SiCong Li Comments-Addressed: Arm Jenkins --- tests/validation/CL/GEMMMatrixMultiplyNative.cpp | 244 +---------------------- 1 file changed, 1 insertion(+), 243 deletions(-) (limited to 'tests/validation/CL/GEMMMatrixMultiplyNative.cpp') diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp index 7f63a03371..0ddf43766f 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022 Arm Limited. + * Copyright (c) 2019-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -53,11 +53,6 @@ using CLGEMMMatrixMultiplyNative = CLSynthetizeOperator using CLGEMMMatrixMultiplyNativeFixture = GEMMMatrixMultiplyNativeValidationFixture; -// Fixture for CLGEMMMatrixMultiplyNative with post ops -template -using CLGEMMMatrixMultiplyNativeWithPostOpsFixture = - GEMMMatrixMultiplyNativeWithPostOpsValidationFixture; - // Fixture for CLGEMMMatrixMultiplyNative3D template using CLGEMMMatrixMultiplyNative3DFixture = GEMMMatrixMultiplyNative3DValidationFixture; @@ -146,105 +141,6 @@ const auto boundary_handling_cases = combine(combine(combine(combine(combine(com broadcast_bias_values), framework::dataset::make("Activation", ActivationLayerInfo())); -/** Post Ops */ -using PostOpArgBroadcast = CLGEMMMatrixMultiplyNativeWithPostOpsFixture::PostOpArgBroadcast; -experimental::PostOpList post_ops_1() -{ - experimental::PostOpList post_ops{}; - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F}); - post_ops.push_back_op>( - std::make_tuple(true, true, false), // If broadcast in dims 0, 1 and 2 - 0, - ConvertPolicy::SATURATE); - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); - return post_ops; -} -experimental::PostOpList post_ops_2() -{ - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( - std::make_tuple(false, true, true), // If broadcast in dims 0, 1 and 2 - 1, - ConvertPolicy::SATURATE); - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); - return post_ops; -} -experimental::PostOpList post_ops_3() -{ - experimental::PostOpList post_ops{}; - // post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); - post_ops.push_back_op>( - std::make_tuple(false, false, false), // If broadcast in dims 0, 1 and 2 - 1, - ConvertPolicy::SATURATE); - return post_ops; -} -// To test that the output of the main op is the first parameter in prelu post op -experimental::PostOpList post_ops_4() -{ - experimental::PostOpList post_ops{}; - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F}); - post_ops.push_back_op>( - std::make_tuple(false, false, true), // If true, broadcast in corresponding dim: 0, 1 or 2 - 0, - ConvertPolicy::SATURATE); - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); - return post_ops; -} -// To test that the output of the main op is the second parameter in prelu post op i.e. it is the alpha_param -experimental::PostOpList post_ops_5() -{ - experimental::PostOpList post_ops{}; - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F}); - post_ops.push_back_op>( - std::make_tuple(false, false, false), // If true, broadcast in corresponding dim: 0, 1 or 2 - 1, - ConvertPolicy::SATURATE); - post_ops.push_back_op>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F}); - return post_ops; -} -/** Different Post Op Lists */ -const auto post_op_lists = framework::dataset::make("post_op_lists", { - post_ops_1(), - post_ops_2(), - post_ops_3(), - post_ops_4(), - post_ops_5() - } ); - -bool is_post_op_list_valid(unsigned int m, unsigned int n, unsigned int k, unsigned int batch, DataType data_type, const experimental::PostOpList& post_ops) -{ - const auto lhs_info = GEMMLHSMatrixInfo(4,4,1,false,true); - const auto rhs_info = GEMMRHSMatrixInfo(4,4,1,true,true,false); - - // Create TensorInfo for post op arguments - TensorInfo input0_info(TensorShape(k, m, batch), 1, data_type); - TensorInfo input1_info(TensorShape(n, k, batch), 1, data_type); - TensorInfo input2_info(TensorShape(n), 1, data_type); - TensorInfo output_info(TensorShape(n, m, batch), 1, data_type); - - GEMMKernelInfo gemm_info(m, n, k, 0 /**< Depth of the output tensor in case is reinterpreted as 3D */, - false /**< reinterpret the input as 3D */, - true /**< Flag used to broadcast the bias addition */, - false /**< wider accumm */, - false /**< has pad y */, - ActivationLayerInfo::ActivationFunction::IDENTITY, - 1 /**< Multiplication factor for the width of the 1xW transposed block */, - 1 /**< Multiplication factor for the height of the 4x4 interleaved block */, - lhs_info, - rhs_info, - 0 /**< Offset to be added to each element of the matrix A */, - 0 /**< Offset to be added to each element of the matrix B */, - post_ops); - return bool(ClGemmMatrixMultiplyNativeKernel::validate(&input0_info.clone()->set_is_resizable(true), - &input1_info.clone()->set_is_resizable(true), - &input2_info.clone()->set_is_resizable(true), - &output_info.clone()->set_is_resizable(true),1.f,1.f, - lhs_info, - rhs_info, - gemm_info)); -} - /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) { @@ -295,119 +191,6 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned TEST_SUITE(CL) TEST_SUITE(GEMMMatrixMultiplyNative) -TEST_SUITE(ValidateFusedPostOpsConfigs) -TEST_SUITE(Invalid) -TEST_CASE(UnsupportedPostOpSequence, framework::DatasetMode::ALL) -{ - const auto data_type = DataType::F32; - const unsigned int m = 17; - const unsigned int n = 1; - const unsigned int k = 13; - const unsigned int batch = 2; - TensorShape post_op_arg0_shape(n, m, batch); - TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type); - auto post_op_arg1_info = post_op_arg_info.clone(); - - // Unsupported sequence of post ops - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( - &post_op_arg_info, - 1, - ConvertPolicy::SATURATE); - post_ops.push_back_op>( - post_op_arg1_info.get(), - 0, - ConvertPolicy::SATURATE); - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS); -} -TEST_CASE(OutputWidened, framework::DatasetMode::ALL) -{ - // Invalid broadcast: post op tensors "widen" the output tensor - const auto data_type = DataType::F32; - const unsigned int m = 1; - const unsigned int n = 18; - const unsigned int k = 13; - const unsigned int batch = 2; - TensorShape post_op_arg_shape(n, m + 1, batch); // output's Y dimension (m) is "widened", which is not allowed - TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type); - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( &post_op_arg_info, 0, ConvertPolicy::SATURATE); - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS); -} -TEST_CASE(BroadcastInXDimOnly, framework::DatasetMode::ALL) -{ - // Invalid broadcast: post op tensors broadcast in the first dimension (X) only - const auto data_type = DataType::F32; - const unsigned int m = 22; - const unsigned int n = 16; - const unsigned int k = 15; - const unsigned int batch = 3; - TensorShape post_op_arg_shape(1, m, batch); - TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type); - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( &post_op_arg_info, 0, ConvertPolicy::SATURATE); - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS); -} -TEST_SUITE_END() // Invalid -TEST_SUITE(Valid) -TEST_CASE(EmptyPostOpList, framework::DatasetMode::ALL) -{ - const auto data_type = DataType::F32; - const unsigned int m = 22; - const unsigned int n = 16; - const unsigned int k = 15; - const unsigned int batch = 3; - experimental::PostOpList post_ops{}; - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS); -} -TEST_CASE(BroadcastInYDimOnly, framework::DatasetMode::ALL) -{ - const auto data_type = DataType::F32; - const unsigned int m = 22; - const unsigned int n = 16; - const unsigned int k = 15; - const unsigned int batch = 3; - TensorShape post_op_arg_shape(n, 1, batch); - TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type); - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( &post_op_arg_info, 0, ConvertPolicy::SATURATE); - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS); -} -TEST_CASE(BroadcastInBothXandYDims, framework::DatasetMode::ALL) -{ - const auto data_type = DataType::F32; - const unsigned int m = 22; - const unsigned int n = 16; - const unsigned int k = 15; - const unsigned int batch = 3; - TensorShape post_op_arg_shape(1, 1, batch); - TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type); - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( &post_op_arg_info, 0, ConvertPolicy::SATURATE); - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS); -} -TEST_CASE(BroadcastInAllDims, framework::DatasetMode::ALL) -{ - const auto data_type = DataType::F32; - const unsigned int m = 22; - const unsigned int n = 16; - const unsigned int k = 15; - const unsigned int batch = 3; - TensorShape post_op_arg_shape(1, 1, 1); - TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type); - experimental::PostOpList post_ops{}; - post_ops.push_back_op>( &post_op_arg_info, 0, ConvertPolicy::SATURATE); - - ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS); -} -TEST_SUITE_END() // Valid -TEST_SUITE_END() // ValidateFusedPostOps TEST_SUITE(Float) TEST_SUITE(FP32) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine( @@ -541,31 +324,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, f validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } -TEST_SUITE(FusedPostOps) - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeWithPostOpsFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - framework::dataset::make("M0", { 4 })), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("alpha", {1.0f} )), - framework::dataset::make("beta", {1.0f} )), - framework::dataset::make("broadcast_bias", { false, true } )), - framework::dataset::make("Activation", { ActivationLayerInfo() })), - post_op_lists) - ) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); -} - -TEST_SUITE_END() // FusedPostOps - TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMulipltyNative -- cgit v1.2.1