aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2020-10-22 16:37:12 +0100
committerGian Marco Iodice <gianmarco.iodice@arm.com>2020-10-26 14:46:44 +0000
commit9ae06d4986bc3055f7786c1097b465bd321cf8eb (patch)
treeadb50e965f860893fe83e3937026056bf1f054c9 /tests
parent5f91041aef3eb7373d5d2cebcbe60f279da85904 (diff)
downloadComputeLibrary-9ae06d4986bc3055f7786c1097b465bd321cf8eb.tar.gz
COMPMID-3925: Dispatch CLGEMM with no padding y requirement
- Add has_pad_y flag in GEMMKernelInfo - Skip reinterpret as 3D in CLGEMMMatrixMultiplyReshapedOnlyRHSKernel if has_pad_y = false - Add test to validate CLGEMMMatrixMultiplyReshapedOnlyRHSkernel with had_pad_y = false/true - Configure two variants of CLGEMMMatrixMultiplyReshapedOnlyRHSKernel to run with has_pad_y = false/true in CLGEMM - Check if the lhs/dst tensors have pad y. If not, run CLGEMMMatrixMultiplyReshapedOnlyRHSKernel without padding requirement Change-Id: I68bb43389789736d676b899ac7c77fd9138babaf Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4248 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp15
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp120
-rw-r--r--tests/validation/fixtures/GEMMFixture.h15
3 files changed, 44 insertions, 106 deletions
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
index 98149ce149..5629a80f8e 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
@@ -340,6 +340,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -354,6 +355,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -371,6 +373,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
false /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -386,6 +389,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
false /**< Flag used to broadcast the bias addition */,
true /**< wider accumm */,
+ true /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -400,6 +404,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
false /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -573,6 +578,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -586,6 +592,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -599,6 +606,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -613,6 +621,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -626,6 +635,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -939,6 +949,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -952,6 +963,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -965,6 +977,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -979,6 +992,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
@@ -992,6 +1006,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false /**< reinterpret the input as 3D */,
true /**< Flag used to broadcast the bias addition */,
false /**< wider accumm */,
+ false /**< has pad y */,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
1 /**< Multiplication factor for the width of the 1xW transposed block */,
1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
index d792afac1d..33912ae2ba 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
@@ -92,13 +92,12 @@ const auto n_values = framework::dataset::make("N", 51);
const auto k_values = framework::dataset::make("K", 23);
/** Batch size values to test */
-const auto b_values = framework::dataset::make("batch_size", 1, 3);
+const auto b_values = framework::dataset::make("batch_size", 2);
/** Activation values to test */
const auto act_values = framework::dataset::make("Activation",
{
- ActivationLayerInfo(),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, -0.8f, 10.f),
});
/** M0 values to test - precommit */
@@ -211,70 +210,6 @@ bool validate_configuration(unsigned int m_value, unsigned int n_value, unsigned
CLGEMMMatrixMultiplyReshapedOnlyRHS gemm;
return bool(gemm.validate(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info));
}
-
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
- unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
- bool i_value_rhs, bool t_value_rhs, bool export_to_cl_image, bool broadcast_bias, bool input_as_3d, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
- DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
-{
- const unsigned int M = m_value;
- const unsigned int N = n_value;
- const unsigned int K = k_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
-
- GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0_value;
- rhs_info.k0 = k0_value;
- rhs_info.h0 = h0_value;
- rhs_info.interleave = i_value_rhs;
- rhs_info.transpose = t_value_rhs;
- rhs_info.export_to_cl_image = export_to_cl_image;
-
- GEMMKernelInfo kernel_info;
- kernel_info.m = M;
- kernel_info.n = N;
- kernel_info.k = K;
- kernel_info.depth_output_gemm3d = depth_output_gemm3d;
- kernel_info.reinterpret_input_as_3d = input_as_3d;
- kernel_info.broadcast_bias = broadcast_bias;
- kernel_info.activation_info = act_info;
-
- const TensorShape lhs_shape(K, M, b_value);
- const TensorShape rhs_shape(N, K, b_value);
- const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
- rhs_info);
-
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, dt_input0),
- TensorInfo(rhs_shape_reshaped, 1, dt_input1),
- kernel_info);
-
- const TensorShape bias_shape(N,
- M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
- broadcast_bias? 1 : b_value);
-
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, dt_input0);
- CLTensor rhs_reshaped = create_tensor<CLTensor>(rhs_shape_reshaped, dt_input1);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, dt_input2);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, dt_output);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Validate zero-padding
- CLGEMMMatrixMultiplyReshapedOnlyRHS gemm;
-
- gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
-
- // Padding can be added along rhs and bias's X dimension
- return dst.info()->padding().empty() && lhs.info()->padding().empty() && bias.info()->padding().bottom == 0 && bias.info()->padding().top == 0;
-}
} // namespace
TEST_SUITE(CL)
@@ -326,33 +261,6 @@ b_value, m0_value, n0_value, k0_value, broadcast_bias, input_as_3d, depth_output
ARM_COMPUTE_EXPECT(status == expected_value, framework::LogLevel::ERRORS);
}
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - No partial blocks in both x and y dimensions
- * - Partial blocks in x dimension
- * - Partial blocks in y dimension
- * - Partial blocks in both x and y dimensions
- * - Special case: partial_n0 == 9 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(zip(
-framework::dataset::make("M", { 24, 64, 101, 1, 100 }),
-framework::dataset::make("N", { 48, 29, 16, 122, 41 })),
-framework::dataset::make("M0", { 4, 8, 7, 2, 1 })),
-framework::dataset::make("N0", { 4, 4, 16, 3, 16 })),
-framework::dataset::make("export_to_cl_image", { false, true, true, false, false })),
-m_value, n_value, m0_value, n0_value, export_to_cl_image)
-{
- constexpr DataType dt = DataType::F32;
- // Disable export_to_cl_image if the target platform does not support the OpenCL cl_khr_image2d_from_buffer extension
- bool actual_export_to_cl_image = image2d_from_buffer_supported(CLKernelLibrary::get().get_device()) && export_to_cl_image;
-
- bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, 1, false, false, actual_export_to_cl_image, false, 0, 0, ActivationLayerInfo(), dt, dt, dt, dt, 1.0f, 1.0f);
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
TEST_SUITE(Float)
TEST_SUITE(FP32)
@@ -443,7 +351,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<fl
}
FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -456,6 +364,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", false)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F32)),
a_values),
beta_values),
@@ -466,7 +375,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
}
FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -479,6 +388,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixtur
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", false)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F32)),
a_values),
beta_values),
@@ -552,7 +462,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<fl
}
FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -565,6 +475,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", true)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F32)),
a_values),
beta_values),
@@ -575,7 +486,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
}
FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -588,6 +499,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixtur
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", true)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F32)),
a_values),
beta_values),
@@ -647,7 +559,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<ha
}
FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -660,6 +572,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", false)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F16)),
a_values),
beta_values),
@@ -670,7 +583,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
}
FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -683,6 +596,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixtur
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", false)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F16)),
a_values),
beta_values),
@@ -756,7 +670,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<ha
}
FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -769,6 +683,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", true)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F16)),
a_values),
beta_values),
@@ -779,7 +694,7 @@ FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixt
}
FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -792,6 +707,7 @@ FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixtur
i_values_rhs),
t_values_rhs),
framework::dataset::make("export_to_cl_image_rhs", true)),
+ framework::dataset::make("has_pad_y", {false, true})),
framework::dataset::make("DataType", DataType::F16)),
a_values),
beta_values),
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index 0a964a7114..6288b6b970 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -1125,7 +1125,7 @@ class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::F
public:
template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0,
- bool interleave_rhs, bool transpose_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info)
+ bool interleave_rhs, bool transpose_rhs, bool export_to_cl_image, bool has_pad_y, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
@@ -1147,7 +1147,7 @@ public:
const TensorShape rhs_shape(n, k, batch_size);
const TensorShape bias_shape(n, 1, 1);
- _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info, has_pad_y);
_reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
}
@@ -1161,7 +1161,7 @@ protected:
TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
DataType data_type, float alpha, float beta,
- unsigned int m_h, const ActivationLayerInfo &act_info)
+ unsigned int m_h, const ActivationLayerInfo &act_info, bool has_pad_y)
{
// Create tensors
TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
@@ -1181,15 +1181,22 @@ protected:
kernel_info.reinterpret_input_as_3d = false;
kernel_info.broadcast_bias = true;
kernel_info.activation_info = act_info;
+ kernel_info.has_pad_y = has_pad_y;
// The output tensor will be auto-initialized within the function
-
// Create and configure function
ReshapeRHSFunctionType reshape_rhs;
GEMMFunctionType gemm;
reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ if(has_pad_y)
+ {
+ // Add dummy padding into lhs to validate has_pad_y path
+ lhs.info()->extend_padding(PaddingSize(2, 0, 2, 0));
+ dst.info()->extend_padding(PaddingSize(2, 0, 1, 0));
+ }
+
ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);