aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2020-10-21 12:34:38 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-10-29 12:05:18 +0000
commit8cf753f68a2adb1234eaeb8ac79a83a8bec14051 (patch)
treef97dea1ce4d777bb5f8b52fb89a14eff55c954e2
parent11d73272b8df5ceb2629fb916b84c768b7c5c65a (diff)
downloadComputeLibrary-8cf753f68a2adb1234eaeb8ac79a83a8bec14051.tar.gz
COMPMID-3720: Remove OpenCL padding CLGEMMLowpMatrixMultiplyReshapedKernel
Change-Id: Ie70ba877f0356661a055f026124904bbf2181a33 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4251 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl11
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp48
2 files changed, 21 insertions, 38 deletions
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 4a05635669..059c2e14df 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -290,7 +290,7 @@
(VECTOR_ACC_TYPE, k0, a, b, c); \
})
-#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N)
+#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices with QASYMM/QASYMM_SIGNED data type.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
@@ -447,7 +447,12 @@ __kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Convert and store output block
- CONVERT_STORE_BLOCK(M0, N0, int, c, dst_addr, dst_stride_y, zout);
+ const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
+ const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
+
+ // Store output block
+ REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_lp);
+ STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef LHS_BLOCK_SIZE
#undef LHS_OFFSET_X
@@ -456,7 +461,7 @@ __kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
-#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(K)
+#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K)
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
index 5469c89c30..713d822f9b 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.cpp
@@ -32,15 +32,10 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-
namespace arm_compute
{
using namespace misc::shape_calculator;
@@ -104,15 +99,10 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)).set_data_type(DataType::S32));
TensorInfo tmp_info(*output);
-
if(reinterpret_output_as_3d)
{
// Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
@@ -125,29 +115,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
// Configure kernel window
num_elems_processed_per_iteration_x = rhs_info.n0;
num_elems_processed_per_iteration_y = lhs_info.m0;
-
- // Note: bottom paddings are calculated manually as the output can be reinterpreted as 3D tensor
- // The only way to set properly the paddings, it is to set those explicitly through the AccessWindowStatic
- const int m = gemm_info.m();
- const int bottom_pad = (num_elems_processed_per_iteration_y - (m % num_elems_processed_per_iteration_y)) % num_elems_processed_per_iteration_y;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic input0_access(input0, 0, 0,
- ceil_to_multiple(input0->dimension(0), num_elems_processed_per_iteration_y),
- input0->dimension(1));
- AccessWindowStatic input1_access(input1, 0, 0,
- ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x),
- input1->dimension(1));
- AccessWindowStatic output_access(output, 0, 0,
- ceil_to_multiple(output->dimension(0), num_elems_processed_per_iteration_x),
- output->dimension(1) + bottom_pad);
-
- window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
-
- output_access.set_valid_region(win_out, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
+ Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
// Collapse along the Z direction
// This collapse needs to be here in order to tune the Z dimension of LWS
@@ -155,8 +123,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
collapsed = win.collapse(win, dimension_to_collapse);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
+ return std::make_pair(Status{}, collapsed);
}
} // namespace
@@ -190,6 +157,7 @@ void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const CLCompileContext &c
const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
_slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
+ auto padding_info = get_padding_info({ input0, input1, output });
ElementsProcessed num_elements_processed{};
// Configure kernel window
@@ -197,6 +165,12 @@ void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const CLCompileContext &c
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
+ // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
+ const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m() : output->info()->dimension(1);
+
+ const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
+ const unsigned int partial_store_n0 = gemm_info.n() % rhs_info.n0;
+
// Create build options
CLBuildOptions build_opts;
build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
@@ -215,6 +189,8 @@ void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const CLCompileContext &c
build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(input0->info()->data_type()));
+ build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
+ build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
std::string kernel_name("gemmlowp_mm_reshaped_");
kernel_name += lhs_info.transpose ? "lhs_t_" : "lhs_nt_";
@@ -250,6 +226,8 @@ void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const CLCompileContext &c
_config_id += support::cpp11::to_string(lhs_info.interleave);
_config_id += "_";
_config_id += support::cpp11::to_string(rhs_info.interleave);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMLowpMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,