aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp')
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp71
1 files changed, 0 insertions, 71 deletions
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
index 9e717dfac9..ce000bd8e1 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
@@ -88,81 +88,10 @@ const auto n0_values_nightly = framework::dataset::make("N0", { 1, 2, 3, 4, 8 })
/** K0 values to test - Nightly */
const auto k0_values_nightly = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 16 });
-
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
-{
- const unsigned int M = m_value;
- const unsigned int N = n_value;
- const unsigned int K = k_value;
-
- GEMMLHSMatrixInfo lhs_info;
- lhs_info.m0 = m0_value;
- lhs_info.k0 = k0_value;
-
- GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0_value;
- rhs_info.k0 = k0_value;
-
- GEMMKernelInfo kernel_info;
- kernel_info.m = M;
- kernel_info.n = N;
- kernel_info.k = K;
- kernel_info.broadcast_bias = broadcast_bias;
- kernel_info.activation_info = act_info;
-
- const TensorShape lhs_shape(K, M, b_value);
- const TensorShape rhs_shape(N, K, b_value);
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
- TensorInfo(rhs_shape, 1, data_type),
- kernel_info);
-
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
- CLTensor rhs = create_tensor<CLTensor>(rhs_shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, DataType::S32);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- CLGEMMLowpMatrixMultiplyNative gemm;
- gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(m_value, n_value, k_value));
-
- // Padding can be added along rhs and bias's X dimension
- return dst.info()->padding().empty() && lhs.info()->padding().empty() && rhs.info()->padding().empty();
-}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMLowpMatrixMultiplyNative)
-
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- * - No partial blocks in both x and y dimensions
- * - Partial blocks in x dimension
- * - Partial blocks in y dimension
- * - Partial blocks in both x and y dimensions
- * - No blocks in both x and y dimensions, scalar store (N0==1)
- * - Special case: partial_n0 == 5 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M", { 24, 63, 1, 51, 255, }),
-framework::dataset::make("N", { 47, 29, 122, 20, 21, })),
-framework::dataset::make("M0", { 4, 8, 2, 1, 8, })),
-framework::dataset::make("N0", { 4, 4, 3, 1, 8, })),
-m_value, n_value, m0_value, n0_value)
-{
- bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, false, DataType::QASYMM8, ActivationLayerInfo());
- ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
-
-
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyNativeFixture, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(m_values,
n_values),