From d820db6fc479f7daef6788377cb765369fcddc22 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Mon, 5 Aug 2019 14:23:23 +0100 Subject: COMPMID-2545: Reduce tests required by GEMM (OpenCL) Removed FP16 tests from the new GEMM functions (GEMMNative, GEMMReshaped and GEMMReshapedOnlyRHS) since not called by CLGEMM Change-Id: Id52281fc9557d45e29db0a74964d4bdec55d8f46 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/1695 Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- .../validation/CL/GEMMLowpMatrixMultiplyNative.cpp | 6 +- .../CL/GEMMLowpMatrixMultiplyReshaped.cpp | 6 +- .../CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp | 6 +- tests/validation/CL/GEMMMatrixMultiply.cpp | 4 +- .../CL/GEMMMatrixMultiplyInterleavedTransposed.cpp | 2 +- tests/validation/CL/GEMMMatrixMultiplyNative.cpp | 87 +---------------- tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp | 103 +-------------------- .../CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp | 99 +------------------- tests/validation/CL/GEMMReshapeLHSMatrix.cpp | 2 +- 9 files changed, 22 insertions(+), 293 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp index 1fc8cc47c4..6243b3f7e3 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp @@ -75,10 +75,10 @@ const auto b_values = framework::dataset::make("batch_size", 1, 3); const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ -const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); +const auto k0_values_precommit = framework::dataset::make("K0", { 16 }); /** M0 values to test - Nightly */ const auto m0_values_nightly = framework::dataset::make("M0", 2, 7); @@ -87,7 +87,7 @@ const auto m0_values_nightly = framework::dataset::make("M0", 2, 7); const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); /** K0 values to test - Nightly */ -const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); +const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8, 16 }); } // namespace TEST_SUITE(CL) diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp index 60b92bd030..de229ef7fb 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp @@ -84,10 +84,10 @@ const auto b_values = framework::dataset::make("batch_size", 1, 3); const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ -const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); +const auto k0_values_precommit = framework::dataset::make("K0", { 16 }); /** V0 values to test - Precommit */ const auto v0_values_precommit = framework::dataset::make("V0", 1, 3); @@ -102,7 +102,7 @@ const auto m0_values_nightly = framework::dataset::make("M0", 2, 7); const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); /** K0 values to test - Nightly */ -const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); +const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8, 16 }); /** V0 values to test - Nightly */ const auto v0_values_nightly = framework::dataset::make("V0", 1, 4); diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp index a907c5b1a1..6ead11ab23 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp @@ -85,10 +85,10 @@ const auto b_values = framework::dataset::make("batch_size", 1, 3); const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ -const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); +const auto k0_values_precommit = framework::dataset::make("K0", { 16 }); /** H0 values to test - Precommit */ const auto h0_values_precommit = framework::dataset::make("H0", 1, 3); @@ -100,7 +100,7 @@ const auto m0_values_nightly = framework::dataset::make("M0", 2, 8); const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); /** K0 values to test - Nightly */ -const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); +const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8, 16 }); /** H0 values to test - Nightly */ const auto h0_values_nightly = framework::dataset::make("H0", 1, 4); diff --git a/tests/validation/CL/GEMMMatrixMultiply.cpp b/tests/validation/CL/GEMMMatrixMultiply.cpp index 8f7c0aaef1..0bc8b02825 100644 --- a/tests/validation/CL/GEMMMatrixMultiply.cpp +++ b/tests/validation/CL/GEMMMatrixMultiply.cpp @@ -70,7 +70,7 @@ constexpr float tolerance_num_f16 = 0.02f; const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test - Precommit */ const auto m_values_precommit = framework::dataset::make("M", {37, 1}); @@ -113,7 +113,7 @@ const auto act_values = framework::dataset::make("Activation", }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** GPU architectures values to test */ const auto gpu_arch_values = framework::dataset::make("GPUArch", diff --git a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp index 5d21cf4f34..c3b461a0d8 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp @@ -80,7 +80,7 @@ constexpr float tolerance_num_f16 = 0.02f; const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test - Precommit */ const auto m_values_precommit = framework::dataset::make("M", 37); diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp index 33b01d8ee6..0dac2e8039 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp @@ -63,9 +63,6 @@ namespace RelativeTolerance rel_tolerance_f32(0.001f); constexpr float abs_tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); @@ -98,10 +95,10 @@ const auto act_values = framework::dataset::make("Activation", }); /** M0 values to test - Precommit */ -const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); +const auto m0_values_precommit = framework::dataset::make("M0", { 4, 6 }); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); @@ -119,7 +116,7 @@ const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) @@ -264,84 +261,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, f validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} -TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMulipltyNative TEST_SUITE_END() // CL diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp index 25221451ed..997c510e42 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp @@ -71,14 +71,11 @@ namespace RelativeTolerance rel_tolerance_f32(0.001f); constexpr float abs_tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test */ const auto m_values = framework::dataset::make("M", 37); @@ -109,7 +106,7 @@ const auto act_values = framework::dataset::make("Activation", const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); @@ -142,7 +139,7 @@ const auto i_values_lhs = framework::dataset::make("interleave_lhs", { true, fal const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, false }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int v0_value, unsigned int h0_value, bool i_value_lhs, bool i_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) @@ -323,100 +320,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - v0_values_precommit), - h0_values_precommit), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - v0_values_nightly), - h0_values_nightly), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - v0_values_precommit), - h0_values_precommit), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - v0_values_nightly), - h0_values_nightly), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} -TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMultiplyReshaped TEST_SUITE_END() // CL diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp index dd993af481..5baab611c6 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp @@ -67,14 +67,11 @@ namespace RelativeTolerance rel_tolerance_f32(0.001f); constexpr float abs_tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test */ const auto m_values = framework::dataset::make("M", 37); @@ -105,7 +102,7 @@ const auto act_values = framework::dataset::make("Activation", const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); @@ -132,7 +129,7 @@ const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, fal const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, false }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value, bool i_value_rhs, bool t_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) @@ -301,96 +298,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - h0_values_precommit), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - h0_values_nightly), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - h0_values_precommit), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - h0_values_nightly), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} -TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMulipltyReshapedOnlyRHS TEST_SUITE_END() // CL diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp index fe8b7ffd1c..e6c3b40124 100644 --- a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp +++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp @@ -66,7 +66,7 @@ const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8 const auto b_values = framework::dataset::make("batchsize", 1, 3); /** M0 values to test - Precommit */ -const auto m0_values_precommit = framework::dataset::make("M0", { 2, 4, 5 }); +const auto m0_values_precommit = framework::dataset::make("M0", { 4, 5 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 2, 4 }); -- cgit v1.2.1