From d820db6fc479f7daef6788377cb765369fcddc22 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Mon, 5 Aug 2019 14:23:23 +0100 Subject: COMPMID-2545: Reduce tests required by GEMM (OpenCL) Removed FP16 tests from the new GEMM functions (GEMMNative, GEMMReshaped and GEMMReshapedOnlyRHS) since not called by CLGEMM Change-Id: Id52281fc9557d45e29db0a74964d4bdec55d8f46 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/1695 Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- .../CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h | 4 +- .../kernels/CLGEMMMatrixMultiplyReshapedKernel.h | 4 +- .../CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h | 4 +- .../kernels/CLGEMMMatrixMultiplyNativeKernel.cpp | 5 +- .../kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp | 5 +- .../CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp | 5 +- .../validation/CL/GEMMLowpMatrixMultiplyNative.cpp | 6 +- .../CL/GEMMLowpMatrixMultiplyReshaped.cpp | 6 +- .../CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp | 6 +- tests/validation/CL/GEMMMatrixMultiply.cpp | 4 +- .../CL/GEMMMatrixMultiplyInterleavedTransposed.cpp | 2 +- tests/validation/CL/GEMMMatrixMultiplyNative.cpp | 87 +---------------- tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp | 103 +-------------------- .../CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp | 99 +------------------- tests/validation/CL/GEMMReshapeLHSMatrix.cpp | 2 +- 15 files changed, 37 insertions(+), 305 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h index 96f412c6a5..a37c261b6d 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h @@ -48,7 +48,7 @@ public: CLGEMMMatrixMultiplyNativeKernel &operator=(CLGEMMMatrixMultiplyNativeKernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input0 Input tensor for the LHS matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4. + * @param[in] input0 Input tensor for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4. * @param[in] input1 Input tensor for the RHS matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3. * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0. * @param[out] output Output tensor info. Data type supported: same as @p input0 @@ -67,7 +67,7 @@ public: const GEMMKernelInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyNativeKernel * - * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4. + * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4. * @param[in] input1 Input tensor info for the RHS matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3. * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0. * @param[in] output Output tensor info. Data type supported: same as @p input0 diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h index 47916b3019..2a76f44284 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h @@ -51,7 +51,7 @@ public: CLGEMMMatrixMultiplyReshapedKernel &operator=(CLGEMMMatrixMultiplyReshapedKernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4 + * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3 * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0. * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 @@ -74,7 +74,7 @@ public: const GEMMKernelInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyReshapedKernel * - * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4 + * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3 * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0. * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h index 3315331e87..e52d3ca099 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h @@ -51,7 +51,7 @@ public: CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &operator=(CLGEMMMatrixMultiplyReshapedOnlyRHSKernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4. + * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4. * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3. * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0. * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 @@ -70,7 +70,7 @@ public: const GEMMKernelInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyReshapedOnlyRHSKernel * - * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4. + * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4. * @param[in] input1 Input tensor info for the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3. * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0. * @param[in] output Output tensor info. Data type supported: same as @p input0 diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp index 00b06f6e24..b1d0059057 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp @@ -57,7 +57,7 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3"); @@ -66,7 +66,8 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8); ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (input2 != nullptr) - && (!gemm_info.broadcast_bias), "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D"); + && (!gemm_info.broadcast_bias), + "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D"); const unsigned int m = gemm_info.m; const unsigned int n = gemm_info.n; diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp index f0405bfd76..63451b49b8 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp @@ -63,7 +63,7 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input0); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3"); @@ -75,7 +75,8 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8); ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (input2 != nullptr) - && (!gemm_info.broadcast_bias), "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D"); + && (!gemm_info.broadcast_bias), + "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D"); const unsigned int m = gemm_info.m; const unsigned int n = gemm_info.n; diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp index 411a122968..0e9ca78918 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -57,7 +57,7 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3"); @@ -66,7 +66,8 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8); ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (input2 != nullptr) - && (!gemm_info.broadcast_bias), "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D"); + && (!gemm_info.broadcast_bias), + "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D"); const unsigned int m = gemm_info.m; const unsigned int n = gemm_info.n; diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp index 1fc8cc47c4..6243b3f7e3 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp @@ -75,10 +75,10 @@ const auto b_values = framework::dataset::make("batch_size", 1, 3); const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ -const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); +const auto k0_values_precommit = framework::dataset::make("K0", { 16 }); /** M0 values to test - Nightly */ const auto m0_values_nightly = framework::dataset::make("M0", 2, 7); @@ -87,7 +87,7 @@ const auto m0_values_nightly = framework::dataset::make("M0", 2, 7); const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); /** K0 values to test - Nightly */ -const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); +const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8, 16 }); } // namespace TEST_SUITE(CL) diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp index 60b92bd030..de229ef7fb 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp @@ -84,10 +84,10 @@ const auto b_values = framework::dataset::make("batch_size", 1, 3); const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ -const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); +const auto k0_values_precommit = framework::dataset::make("K0", { 16 }); /** V0 values to test - Precommit */ const auto v0_values_precommit = framework::dataset::make("V0", 1, 3); @@ -102,7 +102,7 @@ const auto m0_values_nightly = framework::dataset::make("M0", 2, 7); const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); /** K0 values to test - Nightly */ -const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); +const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8, 16 }); /** V0 values to test - Nightly */ const auto v0_values_nightly = framework::dataset::make("V0", 1, 4); diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp index a907c5b1a1..6ead11ab23 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp @@ -85,10 +85,10 @@ const auto b_values = framework::dataset::make("batch_size", 1, 3); const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ -const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); +const auto k0_values_precommit = framework::dataset::make("K0", { 16 }); /** H0 values to test - Precommit */ const auto h0_values_precommit = framework::dataset::make("H0", 1, 3); @@ -100,7 +100,7 @@ const auto m0_values_nightly = framework::dataset::make("M0", 2, 8); const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); /** K0 values to test - Nightly */ -const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); +const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8, 16 }); /** H0 values to test - Nightly */ const auto h0_values_nightly = framework::dataset::make("H0", 1, 4); diff --git a/tests/validation/CL/GEMMMatrixMultiply.cpp b/tests/validation/CL/GEMMMatrixMultiply.cpp index 8f7c0aaef1..0bc8b02825 100644 --- a/tests/validation/CL/GEMMMatrixMultiply.cpp +++ b/tests/validation/CL/GEMMMatrixMultiply.cpp @@ -70,7 +70,7 @@ constexpr float tolerance_num_f16 = 0.02f; const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test - Precommit */ const auto m_values_precommit = framework::dataset::make("M", {37, 1}); @@ -113,7 +113,7 @@ const auto act_values = framework::dataset::make("Activation", }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** GPU architectures values to test */ const auto gpu_arch_values = framework::dataset::make("GPUArch", diff --git a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp index 5d21cf4f34..c3b461a0d8 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp @@ -80,7 +80,7 @@ constexpr float tolerance_num_f16 = 0.02f; const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test - Precommit */ const auto m_values_precommit = framework::dataset::make("M", 37); diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp index 33b01d8ee6..0dac2e8039 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp @@ -63,9 +63,6 @@ namespace RelativeTolerance rel_tolerance_f32(0.001f); constexpr float abs_tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); @@ -98,10 +95,10 @@ const auto act_values = framework::dataset::make("Activation", }); /** M0 values to test - Precommit */ -const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); +const auto m0_values_precommit = framework::dataset::make("M0", { 4, 6 }); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); @@ -119,7 +116,7 @@ const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 }); const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) @@ -264,84 +261,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, f validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} -TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMulipltyNative TEST_SUITE_END() // CL diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp index 25221451ed..997c510e42 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp @@ -71,14 +71,11 @@ namespace RelativeTolerance rel_tolerance_f32(0.001f); constexpr float abs_tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test */ const auto m_values = framework::dataset::make("M", 37); @@ -109,7 +106,7 @@ const auto act_values = framework::dataset::make("Activation", const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); @@ -142,7 +139,7 @@ const auto i_values_lhs = framework::dataset::make("interleave_lhs", { true, fal const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, false }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int v0_value, unsigned int h0_value, bool i_value_lhs, bool i_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) @@ -323,100 +320,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - v0_values_precommit), - h0_values_precommit), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - v0_values_nightly), - h0_values_nightly), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - v0_values_precommit), - h0_values_precommit), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - v0_values_nightly), - h0_values_nightly), - i_values_lhs), - i_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} -TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMultiplyReshaped TEST_SUITE_END() // CL diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp index dd993af481..5baab611c6 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp @@ -67,14 +67,11 @@ namespace RelativeTolerance rel_tolerance_f32(0.001f); constexpr float abs_tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); /** Beta values to test - Precommit */ -const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); +const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); /** M values to test */ const auto m_values = framework::dataset::make("M", 37); @@ -105,7 +102,7 @@ const auto act_values = framework::dataset::make("Activation", const auto m0_values_precommit = framework::dataset::make("M0", {4, 6}); /** N0 values to test - Precommit */ -const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); +const auto n0_values_precommit = framework::dataset::make("N0", { 4 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 4 }); @@ -132,7 +129,7 @@ const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, fal const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, false }); /** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); /** Configuration test */ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value, bool i_value_rhs, bool t_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info) @@ -301,96 +298,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - h0_values_precommit), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_values, - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - h0_values_nightly), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - broadcast_bias_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - h0_values_precommit), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - m0_values_nightly), - n0_values_nightly), - k0_values_nightly), - h0_values_nightly), - i_values_rhs), - t_values_rhs), - framework::dataset::make("DataType", DataType::F16)), - a_values), - beta_values), - act_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} -TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // GEMMMatrixMulipltyReshapedOnlyRHS TEST_SUITE_END() // CL diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp index fe8b7ffd1c..e6c3b40124 100644 --- a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp +++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp @@ -66,7 +66,7 @@ const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8 const auto b_values = framework::dataset::make("batchsize", 1, 3); /** M0 values to test - Precommit */ -const auto m0_values_precommit = framework::dataset::make("M0", { 2, 4, 5 }); +const auto m0_values_precommit = framework::dataset::make("M0", { 4, 5 }); /** K0 values to test - Precommit */ const auto k0_values_precommit = framework::dataset::make("K0", { 2, 4 }); -- cgit v1.2.1