aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-12-16 19:33:50 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:33 +0000
commit1d25ed54a948639d1894c8b021940df70005d519 (patch)
tree96a29126c5b61299d64496fad7f6844412ab2cca /src/runtime
parent57b20109108a90113d29d21ce7d3c873ff19749c (diff)
downloadComputeLibrary-1d25ed54a948639d1894c8b021940df70005d519.tar.gz
COMPMID-759 - CLGEMM optimization for McVail benchmarks
This patch introduces an optimization for CLGEMM on Bifrost architectures which can bring to 40% of FMA utilization on config 3 of McVail. The new CLGEMM does not require any reshape of matrix A and matrix B. This patch also adds the auto-config in CLConvolutionLayer and CLGEMM and extends the interface for NEGEMM and CLGEMM. Change-Id: Ibb354eda45e9ca64b14a99700fb21dff5989dda9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/113716 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp73
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp51
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp22
3 files changed, 70 insertions, 76 deletions
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 64c31d5191..2c1ddc3e3b 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -43,9 +43,6 @@ CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_p
void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(weights, output);
ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
if(biases != nullptr)
@@ -82,6 +79,8 @@ void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const
{
_weights_reshape_kernel.configure(weights, biases_to_use, output);
}
+
+ output->info()->set_quantization_info(weights->info()->quantization_info());
}
void CLConvolutionLayerReshapeWeights::run()
@@ -100,8 +99,8 @@ void CLConvolutionLayerReshapeWeights::run()
CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(memory_manager), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _output_col2im_kernel(),
- _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _append_bias(false), _is_fully_connected_convolution(false),
- _are_weights_reshaped(false), _is_quantized(false)
+ _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _are_weights_reshaped(false), _is_quantized(false),
+ _is_interleaved_transposed(false)
{
}
@@ -157,14 +156,16 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
const DataType dt = input->info()->data_type();
- // Set the GPU target for matrix multiply
+ // Set the GPU target for matrix multiply and im2col and col2im
_mm_kernel.set_target(CLScheduler::get().target());
+ _input_im2col_kernel.set_target(CLScheduler::get().target());
+ _output_col2im_kernel.set_target(CLScheduler::get().target());
- _append_bias = (biases != nullptr) && (!_is_quantized);
- _are_weights_reshaped = weights_info.are_reshaped();
+ const bool append_bias = (biases != nullptr) && (!_is_quantized);
+ _are_weights_reshaped = weights_info.are_reshaped();
- const unsigned bias_element = (_append_bias) ? 1 : 0;
- const ICLTensor *biases_to_use = (_append_bias) ? biases : nullptr;
+ const unsigned bias_element = (append_bias) ? 1 : 0;
+ const ICLTensor *biases_to_use = (append_bias) ? biases : nullptr;
// Get parameters from conv_info
unsigned int stride_x = 0;
@@ -181,8 +182,8 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
conv_info);
// Check if its a "fully connected" convolution
- _is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1));
- const bool run_interleaved = (!_is_fully_connected_convolution && !_is_quantized);
+ const bool is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1));
+ _is_interleaved_transposed = (!is_fully_connected_convolution && !_is_quantized);
unsigned int mat_weights_cols = weights->info()->dimension(3);
unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
@@ -190,7 +191,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
// Reshape weights if needed
if(_are_weights_reshaped)
{
- if(_is_fully_connected_convolution || _is_quantized)
+ if(is_fully_connected_convolution || _is_quantized)
{
mat_weights_cols = weights->info()->dimension(0);
mat_weights_rows = weights->info()->dimension(1);
@@ -204,22 +205,9 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
}
else
{
- if(_is_fully_connected_convolution || _is_quantized)
- {
- // Create tensor to store the reshaped weights
- TensorShape shape_wr(mat_weights_cols, mat_weights_rows);
- _weights_reshaped.allocator()->init(weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_wr));
- _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, false /* 1xW transpose */);
- }
- else
- {
- // Create tensor to store transposed weights
- const float transpose_width = 16.0f / input->info()->element_size();
- TensorShape shape_wt(mat_weights_rows * static_cast<unsigned int>(transpose_width), static_cast<unsigned int>(std::ceil(mat_weights_cols / transpose_width)));
- _weights_reshaped.allocator()->init(weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_wt));
- _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, true /* 1xW transpose */);
- }
- _weights_reshaped.info()->set_quantization_info(weights->info()->quantization_info());
+ // _weights_reshaped will be auto configured in the kernel
+ _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, _is_interleaved_transposed /* 1xW transpose */);
+
weights = &_weights_reshaped;
}
@@ -236,19 +224,6 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
_input_im2col_reshaped.allocator()->init(im2col_reshaped_info);
_memory_group.manage(&_input_im2col_reshaped);
- // Create tensor (interleave) to prepare input tensor for GEMM
- if(run_interleaved)
- {
- TensorShape shape_interleaved = shape_im2col;
- shape_interleaved.set(0, shape_interleaved.x() * 4);
- shape_interleaved.set(1, std::ceil(shape_interleaved.y() / 4.f));
- // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
- TensorInfo interleaved_info(shape_interleaved, 1, dt, input->info()->fixed_point_position());
- interleaved_info.set_quantization_info(input->info()->quantization_info());
- _input_interleaved_reshaped.allocator()->init(interleaved_info);
- _memory_group.manage(&_input_interleaved_reshaped);
- }
-
// Create GEMM output tensor
TensorShape shape_gemm = _input_im2col_reshaped.info()->tensor_shape();
shape_gemm.set(0, mat_weights_cols);
@@ -261,14 +236,17 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
_gemm_output.allocator()->init(info_gemm);
_memory_group.manage(&_gemm_output);
- // Configure kernels
- _input_im2col_kernel.set_target(CLScheduler::get().target());
- _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _append_bias);
+ // Configure im2col
+ _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, append_bias);
// Configure matrix multiply
- if(run_interleaved)
+ if(_is_interleaved_transposed)
{
+ // Configure GEMMInterleave4x4. _input_interleaved_reshaped will be auto configured in the kernel
_input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped);
+ _memory_group.manage(&_input_interleaved_reshaped);
+
+ // Configure GEMM
configure_mm(&_input_interleaved_reshaped, weights, &_gemm_output);
_input_interleaved_reshaped.allocator()->allocate();
}
@@ -289,7 +267,6 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
}
// Configure Col2Im
- _output_col2im_kernel.set_target(CLScheduler::get().target());
_output_col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h));
if(_is_quantized)
{
@@ -323,7 +300,7 @@ void CLConvolutionLayer::run()
// Run im2col
CLScheduler::get().enqueue(_input_im2col_kernel);
- if(!_is_fully_connected_convolution && !_is_quantized)
+ if(_is_interleaved_transposed)
{
// Run interleave4x4
CLScheduler::get().enqueue(_input_interleave_kernel);
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index ca0228fcdb..be2527f4ba 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -39,14 +39,17 @@
using namespace arm_compute;
CLGEMM::CLGEMM(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false)
+ : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false),
+ _is_first_run(true), _reshape_b_only_on_first_run(false)
{
}
-void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta)
+void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
+ ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
+ ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
if(c != nullptr)
{
@@ -60,7 +63,11 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
// If the input tensor has less than 16 rows, we run a special version of GEMM without reshaping the input tensors
- _is_interleaved_transposed = a->info()->dimension(1) > 16;
+ // For Bifrost architectures we do not reshape the input matrices
+ _is_interleaved_transposed = (a->info()->dimension(1) > 16 && CLScheduler::get().target() != GPUTarget::BIFROST);
+
+ // Check if we need to reshape the matrix B only on the first run
+ _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
const ICLTensor *matrix_a = a;
const ICLTensor *matrix_b = b;
@@ -73,31 +80,17 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
matrix_a = &_tmp_a;
matrix_b = &_tmp_b;
- TensorShape shape_tmp_a = a->info()->tensor_shape();
- TensorShape shape_tmp_b = b->info()->tensor_shape();
-
- shape_tmp_a.set(0, a->info()->dimension(0) * 4);
- shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.0f));
-
- const unsigned int transpose_w = max_cl_vector_width / data_size_from_type(b->info()->data_type());
- shape_tmp_b.set(0, b->info()->dimension(1) * transpose_w);
- shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / static_cast<float>(transpose_w)));
-
- TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type(), a->info()->fixed_point_position());
- _tmp_a.allocator()->init(info_a);
-
- TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type(), b->info()->fixed_point_position());
- _tmp_b.allocator()->init(info_b);
-
- // Manage intermediate buffers
- _memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
+ // _tmp_a and _tmp_n will be auto configured in _interleave_kernel and in _transpose_kernel
// Configure interleave kernel
_interleave_kernel.configure(a, &_tmp_a);
// Configure transpose kernel
_transpose_kernel.configure(b, &_tmp_b);
+
+ // Manage intermediate buffers
+ _memory_group.manage(&_tmp_a);
+ _memory_group.manage(&_tmp_b);
}
_mm_kernel.configure(matrix_a, matrix_b, output, alpha, _is_interleaved_transposed);
@@ -126,8 +119,18 @@ void CLGEMM::run()
// Run interleave kernel
CLScheduler::get().enqueue(_interleave_kernel, false);
- // Run transpose kernel
- CLScheduler::get().enqueue(_transpose_kernel, false);
+ if(_is_first_run)
+ {
+ // Run transpose kernel
+ CLScheduler::get().enqueue(_transpose_kernel, false);
+
+ _is_first_run = false;
+ }
+ else if(!_reshape_b_only_on_first_run)
+ {
+ // Run transpose kernel
+ CLScheduler::get().enqueue(_transpose_kernel, false);
+ }
}
// Run matrix multiply kernel
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 03ba43f901..e640b0604c 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -50,15 +50,17 @@ namespace arm_compute
{
NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _mm_optimised_kernel(nullptr), _ma_kernel(), _tmp_a(), _tmp_b(), _workspace(),
- _run_vector_matrix_multiplication(false), _run_addition(false)
+ _run_vector_matrix_multiplication(false), _run_addition(false), _is_first_run(true), _reshape_b_only_on_first_run(false)
{
}
-void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta)
+void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::F32, DataType::F16, DataType::QS8, DataType::QS16);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, d);
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
+ ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
+ ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
if(c != nullptr)
{
@@ -70,6 +72,8 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
ARM_COMPUTE_ERROR_ON_MSG(c->info()->dimension(1) != d->info()->dimension(1), "The C matrix must have the same number of columns as the output matrix");
}
+ // Check if we need to reshape the matrix B only on the first run
+ _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
_run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
// Check if the first input tensor is a vector.
@@ -207,8 +211,18 @@ void NEGEMM::run()
// Run interleave kernel
NEScheduler::get().schedule(&_interleave_kernel, Window::DimY);
- // Run transpose kernel
- NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
+ if(_is_first_run)
+ {
+ // Run transpose kernel
+ NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
+
+ _is_first_run = false;
+ }
+ else if(!_reshape_b_only_on_first_run)
+ {
+ // Run transpose kernel
+ NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
+ }
}
NEScheduler::get().schedule(&_mm_kernel, _run_vector_matrix_multiplication ? Window::DimX : Window::DimY);