aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormorgolock <pablo.tello@arm.com>2020-10-12 14:00:43 +0100
committerPablo Marquez <pablo.tello@arm.com>2020-10-15 17:14:05 +0000
commitcf343e3798d2a8c2ad2fcac488e4b78e2b5c968d (patch)
tree52aeb352689b82e34ff98730d52d970e79d3e7ff
parent3e77c27a07af070677a3a7e34fb3dfc519b7cbd1 (diff)
downloadComputeLibrary-cf343e3798d2a8c2ad2fcac488e4b78e2b5c968d.tar.gz
COMPMID-3719: Remove OpenCL padding: CLGEMMLowpMatrixMultiplyNativeKernel
Change-Id: Iee28abcbba1e7b9e2f3aaa55685936dce815d5a3 Signed-off-by: morgolock <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4141 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl15
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp38
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp71
3 files changed, 93 insertions, 31 deletions
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 8405a7beb7..29314ec581 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -992,10 +992,11 @@ __kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
- uint lhs_offset = lhs_offset_first_element_in_bytes + y * M0 * (uint)lhs_stride_y;
+ uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
// Compute RHS matrix address
- uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0;
+ uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE);
+
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
@@ -1074,7 +1075,8 @@ __kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
rhs_offset += rhs_stride_y;
}
- __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int) + (y * (uint)M0 * dst_stride_y);
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
+
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
@@ -1092,9 +1094,12 @@ __kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
+ const bool cond_y = y == 0;
+ const bool cond_x = ((x + 1) * N0 >= N);
- // Convert and store output block
- CONVERT_STORE_BLOCK(M0, N0, int, c, dst_addr, dst_stride_y, zout);
+
+ // Store output block
+ STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, N, cond_y, cond_x);
}
#endif // defined(M0) && defined(N0) && defined(K0) && defined(K)
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
index 9a2918d12f..d30a9e5d18 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
@@ -111,8 +111,6 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
Window win{};
- Window win_out{};
- bool window_changed = false;
// In case both input and output have to be reinterpreted as 3D tensors,
// force reinterpret_output_as_3d to be false.
@@ -139,28 +137,8 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
num_elems_processed_per_iteration_x = rhs_info.n0;
num_elems_processed_per_iteration_y = lhs_info.m0;
- // Note: bottom paddings are calculated manually as the output can be reinterpreted as 3D tensor
- // The only way to set properly the paddings, it is to set those explicitly through the AccessWindowStatic
- const int m = reinterpret_output_as_3d ? gemm_info.m() : input0->dimension(1);
- const int bottom_pad = (num_elems_processed_per_iteration_y - (m % num_elems_processed_per_iteration_y)) % num_elems_processed_per_iteration_y;
-
win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic input0_access(input0, 0, 0,
- input0->dimension(0),
- input0->dimension(1) + bottom_pad);
- AccessWindowStatic input1_access(input1, 0, 0,
- ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x),
- input1->dimension(1));
- AccessWindowStatic output_access(output, 0, 0,
- ceil_to_multiple(output->dimension(0), num_elems_processed_per_iteration_x),
- output->dimension(1) + bottom_pad);
-
- window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
-
- output_access.set_valid_region(win_out, ValidRegion(Coordinates(), output->tensor_shape()));
+ output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
// Collapse along the Z direction
// This collapse needs to be here in order to tune the Z dimension of LWS
@@ -168,8 +146,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
collapsed = win.collapse(win, dimension_to_collapse);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
+ return std::make_pair(Status(), collapsed);
}
} // namespace
@@ -218,6 +195,14 @@ void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const CLCompileContext &com
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
+ // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true,
+ // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
+ // This means that the actual m used by the kernel is given by output->info()->dimension(1) and not by gemm_info.m
+ const unsigned int internal_m = input0->info()->dimension(1);
+ // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
+ const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
+ const unsigned int partial_store_n0 = gemm_info.n() % rhs_info.n0;
+
// Create build options
CLBuildOptions build_opts;
build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
@@ -234,7 +219,8 @@ void CLGEMMLowpMatrixMultiplyNativeKernel::configure(const CLCompileContext &com
build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(input0->info()->data_type()));
-
+ build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
+ build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
std::string kernel_name("gemmlowp_mm_native");
// Create kernel
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
index ce000bd8e1..9e717dfac9 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
@@ -88,10 +88,81 @@ const auto n0_values_nightly = framework::dataset::make("N0", { 1, 2, 3, 4, 8 })
/** K0 values to test - Nightly */
const auto k0_values_nightly = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 16 });
+
+/** Zero padding test */
+bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
+{
+ const unsigned int M = m_value;
+ const unsigned int N = n_value;
+ const unsigned int K = k_value;
+
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0_value;
+ lhs_info.k0 = k0_value;
+
+ GEMMRHSMatrixInfo rhs_info;
+ rhs_info.n0 = n0_value;
+ rhs_info.k0 = k0_value;
+
+ GEMMKernelInfo kernel_info;
+ kernel_info.m = M;
+ kernel_info.n = N;
+ kernel_info.k = K;
+ kernel_info.broadcast_bias = broadcast_bias;
+ kernel_info.activation_info = act_info;
+
+ const TensorShape lhs_shape(K, M, b_value);
+ const TensorShape rhs_shape(N, K, b_value);
+ const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
+ TensorInfo(rhs_shape, 1, data_type),
+ kernel_info);
+
+ // Create tensors
+ CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
+ CLTensor rhs = create_tensor<CLTensor>(rhs_shape, data_type);
+ CLTensor dst = create_tensor<CLTensor>(dst_shape, DataType::S32);
+
+ ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLGEMMLowpMatrixMultiplyNative gemm;
+ gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(m_value, n_value, k_value));
+
+ // Padding can be added along rhs and bias's X dimension
+ return dst.info()->padding().empty() && lhs.info()->padding().empty() && rhs.info()->padding().empty();
+}
} // namespace
TEST_SUITE(CL)
TEST_SUITE(GEMMLowpMatrixMultiplyNative)
+
+/** Validate zero padding tests
+ *
+ * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
+ *
+ * Checks performed in order:
+ * - No partial blocks in both x and y dimensions
+ * - Partial blocks in x dimension
+ * - Partial blocks in y dimension
+ * - Partial blocks in both x and y dimensions
+ * - No blocks in both x and y dimensions, scalar store (N0==1)
+ * - Special case: partial_n0 == 5 (vstore1 should be invoked instead of vstore_partial_1)
+ */
+DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
+framework::dataset::make("M", { 24, 63, 1, 51, 255, }),
+framework::dataset::make("N", { 47, 29, 122, 20, 21, })),
+framework::dataset::make("M0", { 4, 8, 2, 1, 8, })),
+framework::dataset::make("N0", { 4, 4, 3, 1, 8, })),
+m_value, n_value, m0_value, n0_value)
+{
+ bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, false, DataType::QASYMM8, ActivationLayerInfo());
+ ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
+}
+
+
+
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyNativeFixture, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(m_values,
n_values),