From ca1f460ec33e84b9df84e29de3c3b733e6042b9c Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Tue, 16 Jul 2019 15:46:48 +0100 Subject: COMPMID-1979: Fuse Activation Function in CLGEMM - part 2 Fuse activation function in: CLGEMMMatrixMultiplyNativeKernel CLGEMMMatrixMultiplyReshapedKernel CLGEMMMatrixMultiplyReshapedOnlyRHSKernel Change-Id: I033ace2bdc58903594c9f31175e4b23c4b559f6f Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/1565 Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Giuseppe Rossini --- tests/framework/Macros.h | 7 +- tests/validation/CL/GEMMMatrixMultiplyNative.cpp | 65 ++++++++++------ tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp | 57 +++++++++----- .../CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp | 57 +++++++++----- tests/validation/fixtures/GEMMFixture.h | 87 +++++++++++++--------- 5 files changed, 171 insertions(+), 102 deletions(-) (limited to 'tests') diff --git a/tests/framework/Macros.h b/tests/framework/Macros.h index 134f75e287..669bafeac7 100644 --- a/tests/framework/Macros.h +++ b/tests/framework/Macros.h @@ -49,8 +49,8 @@ #define CONCAT(ARG0, ARG1) ARG0##ARG1 -#define VARIADIC_SIZE_IMPL(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, size, ...) size -#define VARIADIC_SIZE(...) VARIADIC_SIZE_IMPL(__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define VARIADIC_SIZE_IMPL(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, size, ...) size +#define VARIADIC_SIZE(...) VARIADIC_SIZE_IMPL(__VA_ARGS__, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #define JOIN_PARAM1(OP, param) OP(0, param) #define JOIN_PARAM2(OP, param, ...) \ @@ -86,6 +86,9 @@ #define JOIN_PARAM12(OP, param, ...) \ OP(11, param) \ , JOIN_PARAM11(OP, __VA_ARGS__) +#define JOIN_PARAM13(OP, param, ...) \ + OP(12, param) \ + , JOIN_PARAM12(OP, __VA_ARGS__) #define JOIN_PARAM(OP, NUM, ...) \ CONCAT(JOIN_PARAM, NUM) \ (OP, __VA_ARGS__) diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp index 031b807ef8..33b01d8ee6 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp @@ -90,6 +90,13 @@ const auto k_values = framework::dataset::make("K", 23); /** Batch size values to test */ const auto b_values = framework::dataset::make("batch_size", 1, 3); +/** Activation values to test */ +const auto act_values = framework::dataset::make("Activation", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), +}); + /** M0 values to test - Precommit */ const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); @@ -115,7 +122,7 @@ const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); /** Configuration test */ -void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type) +void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) { const unsigned int M = m_value; const unsigned int N = n_value; @@ -130,10 +137,11 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned rhs_info.k0 = k0_value; GEMMKernelInfo kernel_info; - kernel_info.m = M; - kernel_info.n = N; - kernel_info.k = K; - kernel_info.broadcast_bias = broadcast_bias; + kernel_info.m = M; + kernel_info.n = N; + kernel_info.k = K; + kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; const TensorShape lhs_shape(K, M, b_value); const TensorShape rhs_shape(N, K, b_value); @@ -165,7 +173,7 @@ TEST_SUITE(CL) TEST_SUITE(GEMMMatrixMultiplyNative) TEST_SUITE(Float) TEST_SUITE(FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine( +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -174,13 +182,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi n0_values_precommit), k0_values_precommit), broadcast_bias_values), -m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias) + act_values), +m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, act_value) { - validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, DataType::F32); + validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, DataType::F32, act_value); } FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -191,14 +200,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, frame framework::dataset::make("DataType", DataType::F32)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -209,14 +219,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture, frame framework::dataset::make("DataType", DataType::F32)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -227,14 +238,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, f k0_values_precommit), framework::dataset::make("DataType", DataType::F32)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -245,7 +257,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, f k0_values_nightly), framework::dataset::make("DataType", DataType::F32)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); @@ -254,7 +267,7 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -265,14 +278,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framew framework::dataset::make("DataType", DataType::F16)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -283,14 +297,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture, framew framework::dataset::make("DataType", DataType::F16)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -301,14 +316,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, fr k0_values_precommit), framework::dataset::make("DataType", DataType::F16)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -319,7 +335,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, fr k0_values_nightly), framework::dataset::make("DataType", DataType::F16)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp index bd70ddbe8c..99af2965d2 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp @@ -98,6 +98,13 @@ const auto k_values = framework::dataset::make("K", 23); /** Batch size values to test */ const auto b_values = framework::dataset::make("batch_size", 1, 3); +/** Activation values to test */ +const auto act_values = framework::dataset::make("Activation", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), +}); + /** M0 values to test - Precommit */ const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); @@ -138,7 +145,7 @@ const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, fal const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); /** Configuration test */ -void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int v0_value, unsigned int h0_value, bool i_value_lhs, bool i_value_rhs, bool broadcast_bias, DataType data_type) +void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int v0_value, unsigned int h0_value, bool i_value_lhs, bool i_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) { const unsigned int M = m_value; const unsigned int N = n_value; @@ -165,6 +172,7 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned kernel_info.depth_output_gemm3d = 0; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; const TensorShape lhs_shape(K, M, b_value); const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, data_type), @@ -204,7 +212,7 @@ TEST_SUITE(CL) TEST_SUITE(GEMMMatrixMultiplyReshaped) TEST_SUITE(Float) TEST_SUITE(FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -217,13 +225,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi i_values_lhs), i_values_rhs), broadcast_bias_values), -m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, broadcast_bias) + act_values), +m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, broadcast_bias, act_value) { - validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, broadcast_bias, DataType::F32); + validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, broadcast_bias, DataType::F32, act_value); } FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -238,14 +247,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, fra framework::dataset::make("DataType", DataType::F32)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -260,14 +270,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, fra framework::dataset::make("DataType", DataType::F32)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -282,14 +293,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_rhs), framework::dataset::make("DataType", DataType::F32)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -304,7 +316,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_rhs), framework::dataset::make("DataType", DataType::F32)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); @@ -313,7 +326,7 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -328,14 +341,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, fram framework::dataset::make("DataType", DataType::F16)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -350,14 +364,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, fram framework::dataset::make("DataType", DataType::F16)), a_values), beta_values), - broadcast_bias_values)) + broadcast_bias_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -372,14 +387,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_rhs), framework::dataset::make("DataType", DataType::F16)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -394,7 +410,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_rhs), framework::dataset::make("DataType", DataType::F16)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp index 6c34fc870a..dd993af481 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp @@ -94,6 +94,13 @@ const auto k_values = framework::dataset::make("K", 23); /** Batch size values to test */ const auto b_values = framework::dataset::make("batch_size", 1, 3); +/** Activation values to test */ +const auto act_values = framework::dataset::make("Activation", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), +}); + /** M0 values to test - Precommit */ const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); @@ -128,7 +135,7 @@ const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, fals const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); /** Configuration test */ -void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value, bool i_value_rhs, bool t_value_rhs, bool broadcast_bias, DataType data_type) +void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value, bool i_value_rhs, bool t_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) { const unsigned int M = m_value; const unsigned int N = n_value; @@ -152,6 +159,7 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned kernel_info.depth_output_gemm3d = 0; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; const TensorShape lhs_shape(K, M, b_value); const TensorShape rhs_shape(N, K, b_value); @@ -187,7 +195,7 @@ TEST_SUITE(CL) TEST_SUITE(GEMMMatrixMultiplyReshapedOnlyRHS) TEST_SUITE(Float) TEST_SUITE(FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -199,13 +207,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi i_values_rhs), t_values_rhs), broadcast_bias_values), -m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs, t_value_rhs, broadcast_bias) + act_values), +m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs, t_value_rhs, broadcast_bias, act_value) { - validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs, t_value_rhs, broadcast_bias, DataType::F32); + validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs, t_value_rhs, broadcast_bias, DataType::F32, act_value); } FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -219,14 +228,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -240,14 +250,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -261,14 +272,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< t_values_rhs), framework::dataset::make("DataType", DataType::F32)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -282,7 +294,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< t_values_rhs), framework::dataset::make("DataType", DataType::F32)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); @@ -291,7 +304,7 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -305,14 +318,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -326,14 +340,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -347,14 +362,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< t_values_rhs), framework::dataset::make("DataType", DataType::F16)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -368,7 +384,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< t_values_rhs), framework::dataset::make("DataType", DataType::F16)), a_values), - beta_values)) + beta_values), + act_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index a225c71ab7..ac8ab2a949 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -33,6 +33,7 @@ #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" +#include "tests/validation/reference/ActivationLayer.h" #include "tests/validation/reference/GEMM.h" #include @@ -158,7 +159,7 @@ class GEMMMatrixMultiplyReshapedValidationFixture : public framework::Fixture public: template void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, - bool interleave_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias) + bool interleave_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -181,8 +182,8 @@ public: broadcast_bias ? 1 : m, broadcast_bias ? 1 : batch_size); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -198,7 +199,7 @@ protected: } TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - DataType data_type, float alpha, float beta, bool broadcast_bias) + DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { // Create tensors TensorType lhs = create_tensor(lhs_shape, data_type, 1); @@ -218,6 +219,7 @@ protected: kernel_info.depth_output_gemm3d = 0; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; // The output tensor will be auto-initialized within the function @@ -261,7 +263,8 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; dst_shape[0] = rhs_shape[0]; @@ -290,7 +293,7 @@ protected: } } - return reference::gemm(lhs, rhs, bias, alpha, beta); + return reference::activation_layer(reference::gemm(lhs, rhs, bias, alpha, beta), act_info); } TensorType _target{}; @@ -304,7 +307,7 @@ public: template void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, - bool interleave_rhs, DataType data_type, float alpha, float beta) + bool interleave_rhs, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -328,8 +331,8 @@ public: const TensorShape rhs_shape(n, k, batch_size); const TensorShape bias_shape(n, 1, 1); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -341,7 +344,7 @@ protected: } TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - DataType data_type, float alpha, float beta, unsigned int m_h) + DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { // Create tensors TensorType lhs = create_tensor(lhs_shape, data_type, 1); @@ -361,6 +364,7 @@ protected: kernel_info.depth_output_gemm3d = m_h; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = true; + kernel_info.activation_info = act_info; // The output tensor will be auto-initialized within the function @@ -404,7 +408,8 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; dst_shape.set(0, rhs_shape[0]); @@ -432,7 +437,7 @@ protected: memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); } - return reference::gemm(lhs, rhs, bias, alpha, beta); + return reference::activation_layer(reference::gemm(lhs, rhs, bias, alpha, beta), act_info); } TensorType _target{}; @@ -445,7 +450,7 @@ class GEMMMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fix public: template void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, - bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias) + bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -465,8 +470,8 @@ public: broadcast_bias ? 1 : m, broadcast_bias ? 1 : batch_size); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -482,7 +487,7 @@ protected: } TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - DataType data_type, float alpha, float beta, bool broadcast_bias) + DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { // Create tensors TensorType lhs = create_tensor(lhs_shape, data_type, 1); @@ -501,6 +506,7 @@ protected: kernel_info.depth_output_gemm3d = 0; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; // The output tensor will be auto-initialized within the function @@ -539,7 +545,8 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; dst_shape[0] = rhs_shape[0]; @@ -568,7 +575,7 @@ protected: } } - return (reference::gemm(lhs, rhs, bias, alpha, beta)); + return reference::activation_layer(reference::gemm(lhs, rhs, bias, alpha, beta), act_info); } TensorType _target{}; @@ -581,7 +588,7 @@ class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::F public: template void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, - bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta) + bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -602,8 +609,8 @@ public: const TensorShape rhs_shape(n, k, batch_size); const TensorShape bias_shape(n, 1, 1); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -616,7 +623,7 @@ protected: TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type, float alpha, float beta, - unsigned int m_h) + unsigned int m_h, const ActivationLayerInfo &act_info) { // Create tensors TensorType lhs = create_tensor(lhs_shape, data_type, 1); @@ -635,6 +642,7 @@ protected: kernel_info.depth_output_gemm3d = m_h; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = true; + kernel_info.activation_info = act_info; // The output tensor will be auto-initialized within the function @@ -673,7 +681,8 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; dst_shape.set(0, rhs_shape[0]); @@ -701,7 +710,7 @@ protected: memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); } - return reference::gemm(lhs, rhs, bias, alpha, beta); + return reference::activation_layer(reference::gemm(lhs, rhs, bias, alpha, beta), act_info); } TensorType _target{}; @@ -713,7 +722,8 @@ class GEMMMatrixMultiplyNativeValidationFixture : public framework::Fixture { public: template - void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta, bool broadcast_bias) + void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta, bool broadcast_bias, + const ActivationLayerInfo &act_info) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -730,8 +740,8 @@ public: broadcast_bias ? 1 : m, broadcast_bias ? 1 : batch_size); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -747,7 +757,7 @@ protected: } TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - DataType data_type, float alpha, float beta, bool broadcast_bias) + DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { // Create tensors TensorType lhs = create_tensor(lhs_shape, data_type, 1); @@ -765,6 +775,7 @@ protected: kernel_info.depth_output_gemm3d = 0; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; // Create and configure function GEMMFunctionType gemm; @@ -796,7 +807,8 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; dst_shape[0] = rhs_shape[0]; @@ -825,7 +837,7 @@ protected: } } - return reference::gemm(lhs, rhs, bias, alpha, beta); + return reference::activation_layer(reference::gemm(lhs, rhs, bias, alpha, beta), act_info); } TensorType _target{}; @@ -837,7 +849,8 @@ class GEMMMatrixMultiplyNative3DValidationFixture : public framework::Fixture { public: template - void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta) + void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta, + const ActivationLayerInfo &act_info) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -855,8 +868,8 @@ public: const TensorShape rhs_shape(n, k, batch_size); const TensorShape bias_shape(n, 1, 1); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -868,7 +881,7 @@ protected: } TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - DataType data_type, float alpha, float beta, unsigned int m_h) + DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { // Create tensors TensorType lhs = create_tensor(lhs_shape, data_type, 1); @@ -886,6 +899,7 @@ protected: kernel_info.depth_output_gemm3d = m_h; kernel_info.reinterpret_input_as_3d = false; kernel_info.broadcast_bias = true; + kernel_info.activation_info = act_info; // The output tensor will be auto-initialized within the function @@ -919,7 +933,8 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; dst_shape.set(0, rhs_shape[0]); @@ -947,7 +962,7 @@ protected: memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); } - return reference::gemm(lhs, rhs, bias, alpha, beta); + return reference::activation_layer(reference::gemm(lhs, rhs, bias, alpha, beta), act_info); } TensorType _target{}; -- cgit v1.2.1