aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp')
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp181
1 files changed, 71 insertions, 110 deletions
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
index 9fc6fd713d..b8b586053b 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -67,10 +67,10 @@ namespace
RelativeTolerance<float> rel_tolerance_f32(0.001f);
constexpr float abs_tolerance_f32(0.0001f);
-/** Alpha values to test - Precommit */
-const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} );
+/** Alpha values to test */
+const auto a_values = framework::dataset::make("alpha", {-0.75f} );
-/** Beta values to test - Precommit */
+/** Beta values to test */
const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} );
/** M values to test */
@@ -98,29 +98,17 @@ const auto act_values = framework::dataset::make("Activation",
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
});
-/** M0 values to test - Precommit */
-const auto m0_values_precommit = framework::dataset::make("M0", {4, 6});
+/** M0 values to test */
+const auto m0_values = framework::dataset::make("M0", { 8 });
-/** N0 values to test - Precommit */
-const auto n0_values_precommit = framework::dataset::make("N0", { 4 });
+/** N0 values to test */
+const auto n0_values = framework::dataset::make("N0", { 16 });
-/** K0 values to test - Precommit */
-const auto k0_values_precommit = framework::dataset::make("K0", { 4 });
+/** K0 values to test */
+const auto k0_values = framework::dataset::make("K0", { 16 });
-/** H0 values to test - Precommit */
-const auto h0_values_precommit = framework::dataset::make("H0", 1, 3);
-
-/** M0 values to test - Nightly */
-const auto m0_values_nightly = framework::dataset::make("M0", 1, 8);
-
-/** N0 values to test - Nightly */
-const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 });
-
-/** K0 values to test - Nightly */
-const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 });
-
-/** H0 values to test - Nightly */
-const auto h0_values_nightly = framework::dataset::make("H0", 1, 4);
+/** H0 values to test */
+const auto h0_values = framework::dataset::make("H0", 1, 3);
/** Interleave values to test with RHS matrix */
const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, false });
@@ -132,7 +120,10 @@ const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, fals
const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } );
/** Configuration test */
-void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value, bool i_value_rhs, bool t_value_rhs, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
+bool validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
+ unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
+ bool i_value_rhs, bool t_value_rhs, bool broadcast_bias, bool input_as_3d, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
+ DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
{
const unsigned int M = m_value;
const unsigned int N = n_value;
@@ -153,95 +144,86 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned
kernel_info.m = M;
kernel_info.n = N;
kernel_info.k = K;
- kernel_info.depth_output_gemm3d = 0;
- kernel_info.reinterpret_input_as_3d = false;
+ kernel_info.depth_output_gemm3d = depth_output_gemm3d;
+ kernel_info.reinterpret_input_as_3d = input_as_3d;
kernel_info.broadcast_bias = broadcast_bias;
kernel_info.activation_info = act_info;
const TensorShape lhs_shape(K, M, b_value);
const TensorShape rhs_shape(N, K, b_value);
- const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, data_type),
+ const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
rhs_info);
- const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
- TensorInfo(rhs_shape_reshaped, 1, data_type),
+ const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, dt_input0),
+ TensorInfo(rhs_shape_reshaped, 1, dt_input1),
kernel_info);
const TensorShape bias_shape(N,
- broadcast_bias? 1 : M,
+ M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
broadcast_bias? 1 : b_value);
- // Create tensors
- CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
- CLTensor rhs_reshaped = create_tensor<CLTensor>(rhs_shape_reshaped, data_type);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // Create tensor info
+ TensorInfo lhs = TensorInfo(lhs_shape, 1, dt_input0);
+ TensorInfo rhs_reshaped = TensorInfo(rhs_shape_reshaped, 1, dt_input1);
+ TensorInfo bias = TensorInfo(bias_shape, 1, dt_input2);
+ TensorInfo dst = TensorInfo(dst_shape, 1, dt_output);
// Create and configure function
CLGEMMMatrixMultiplyReshapedOnlyRHS gemm;
- gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
+ return bool(gemm.validate(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info));
}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMMatrixMultiplyReshapedOnlyRHS)
-TEST_SUITE(Float)
-TEST_SUITE(FP32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
- k_values),
- framework::dataset::make("batch_size", 1)),
- m0_values_precommit),
- n0_values_precommit),
- k0_values_precommit),
- h0_values_precommit),
- i_values_rhs),
- t_values_rhs),
- broadcast_bias_values),
- act_values),
-m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs, t_value_rhs, broadcast_bias, act_value)
-{
- validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs, t_value_rhs, broadcast_bias, DataType::F32, act_value);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
- k_values),
- b_values),
- m0_values_precommit),
- n0_values_precommit),
- k0_values_precommit),
- h0_values_precommit),
- i_values_rhs),
- t_values_rhs),
- framework::dataset::make("DataType", DataType::F32)),
- a_values),
- beta_values),
- broadcast_bias_values),
- act_values))
+/** Validate tests
+ *
+ * A series of validation tests on configurations which according to the API specification
+ * the function should fail against.
+ *
+ * Checks performed in order:
+ * - Mismachting data type: input1, input2 and output need to have same data type as input0. Support data type: F32/F16.
+ * - Unsupported M0: MO can only be 1,2,3,4,5,6,7,8
+ * - Unsupported N0: NO can only be 2,3,4,8,16
+ * - Unsupported K0: KO can only be 2,3,4,8,16
+ * - Unsupported bias addition: bias broadcast mode is 0 if the input or output has to be reinterpreted as 3D
+ * - Incorrect bias diemension when bias broadcast mode is 1 and beta is not 0.0f, should be (n, 1), not (n, m)
+ * - Incorrect input0 dimension when input is reinterpreted as 3D: input0->dimension(1) * input0->dimension(2) != m
+ */
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
+framework::dataset::make("batch_size", { 1, 1, 1, 1, 1, 1, 2 }),
+framework::dataset::make("M0", { 4, 9, 4, 4, 4, 4, 4 })),
+framework::dataset::make("N0", { 4, 4, 18, 4, 4, 4, 4 })),
+framework::dataset::make("K0", { 4, 4, 4, 1, 4, 4, 4 })),
+framework::dataset::make("broadcast_bias", { false, false, false, false, false, true, true })),
+framework::dataset::make("input_as_3d", { 0, 0, 0, 0, 1, 0, 1 })),
+framework::dataset::make("depth_output_gemm3d", { 0, 0, 0, 0, 0, 1, 0 })),
+framework::dataset::make("data_type_input0", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32})),
+framework::dataset::make("data_type_input1", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32})),
+framework::dataset::make("data_type_input2", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32})),
+framework::dataset::make("data_type_output", { DataType::F16, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32})),
+framework::dataset::make("Beta", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f })),
+framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
+b_value, m0_value, n0_value, k0_value, broadcast_bias, input_as_3d, depth_output_gemm3d, dt_input0, dt_intpu1, dt_input2, dt_output, beta, expected)
{
- // Validate output
- validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+ bool status = validate_configuration(37, 51, 23, b_value, m0_value, n0_value, k0_value, 1, false, false, broadcast_bias, input_as_3d, depth_output_gemm3d, ActivationLayerInfo(), dt_input0, dt_intpu1, dt_input2, dt_output, 1.0f, beta);
+ ARM_COMPUTE_EXPECT(status == expected, framework::LogLevel::ERRORS);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::DISABLED,
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_values,
n_values),
k_values),
b_values),
- m0_values_nightly),
- n0_values_nightly),
- k0_values_nightly),
- h0_values_nightly),
+ m0_values),
+ n0_values),
+ k0_values),
+ h0_values),
i_values_rhs),
t_values_rhs),
framework::dataset::make("DataType", DataType::F32)),
@@ -261,10 +243,10 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<
n_values),
k_values),
b_values),
- m0_values_precommit),
- n0_values_precommit),
- k0_values_precommit),
- h0_values_precommit),
+ m0_values),
+ n0_values),
+ k0_values),
+ h0_values),
i_values_rhs),
t_values_rhs),
framework::dataset::make("DataType", DataType::F32)),
@@ -276,27 +258,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<
validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::DISABLED,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_w_values,
- m_h_values),
- n_values),
- k_values),
- b_values),
- m0_values_nightly),
- n0_values_nightly),
- k0_values_nightly),
- h0_values_nightly),
- i_values_rhs),
- t_values_rhs),
- framework::dataset::make("DataType", DataType::F32)),
- a_values),
- beta_values),
- act_values))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
-}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
TEST_SUITE_END() // GEMMMatrixMulipltyReshapedOnlyRHS