From 20d7848b1a0447dced362b3df57e9d30aebac5d4 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Thu, 11 Jan 2018 15:10:58 +0000 Subject: COMPMID-816 - Enabled CLConvolutionLayer to use CLGEMM function instead of CLGEMMMatrixMultiplyKernel kernel. Change-Id: If035fa3d1fb3ff4012442bcd908c370d21aa6657 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/115990 Tested-by: Jenkins Reviewed-by: Pablo Tello Reviewed-by: Anthony Barbier --- src/runtime/CL/functions/CLConvolutionLayer.cpp | 118 ++++++++++++++---------- 1 file changed, 71 insertions(+), 47 deletions(-) (limited to 'src/runtime/CL/functions/CLConvolutionLayer.cpp') diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp index 2c1ddc3e3b..d1153978ff 100644 --- a/src/runtime/CL/functions/CLConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -87,7 +87,6 @@ void CLConvolutionLayerReshapeWeights::run() { _memory_group.acquire(); - cl::CommandQueue q = CLScheduler::get().queue(); CLScheduler::get().enqueue(_weights_reshape_kernel); if(_transpose1xW) { @@ -98,33 +97,49 @@ void CLConvolutionLayerReshapeWeights::run() } CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr memory_manager) - : _memory_group(memory_manager), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _output_col2im_kernel(), - _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _are_weights_reshaped(false), _is_quantized(false), + : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _interleave_kernel(), _mm_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), + _col2im_kernel(), _im2col_output(), _interleave_output(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _are_weights_reshaped(false), _is_quantized(false), _is_interleaved_transposed(false) { } -void CLConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed) +void CLConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed, bool are_weights_reshaped) { if(_is_quantized) { - // Since we need negative offsets for computing convolution, we need to change QuantizationInfo() - // Extract and negate input and weights offset - const QuantizationInfo input_quantization_info = input->info()->quantization_info(); - const QuantizationInfo weights_quantization_info = weights->info()->quantization_info(); + if(are_weights_reshaped) + { + ARM_COMPUTE_ERROR("Weights already reshaped are not suppported with gemmlowp"); + } + else + { + // Since we need negative offsets for computing convolution, we need to change QuantizationInfo() + // Extract and negate input and weights offset + const QuantizationInfo input_quantization_info = input->info()->quantization_info(); + const QuantizationInfo weights_quantization_info = weights->info()->quantization_info(); - input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset)); - weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); + input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset)); + weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); - _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); + _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); - // Revert back QuantizatioInfo as input and weights could be used in other convolution layers - input->info()->set_quantization_info(input_quantization_info); - weights->info()->set_quantization_info(weights_quantization_info); + // Revert back QuantizatioInfo as input and weights could be used in other convolution layers + input->info()->set_quantization_info(input_quantization_info); + weights->info()->set_quantization_info(weights_quantization_info); + } } else { - _mm_kernel.configure(input, weights, output, 1.f, is_interleaved_transposed); + if(are_weights_reshaped) + { + // Configure matrix multiply kernel + _mm_kernel.configure(input, weights, output, 1.f, is_interleaved_transposed); + } + else + { + // Configure matrix multiply function + _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); + } } } @@ -133,6 +148,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, weights); + ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && CLScheduler::get().target() == GPUTarget::BIFROST); ARM_COMPUTE_ERROR_ON(!weights_info.are_reshaped() && weights->info()->dimension(2) != input->info()->dimension(2)); ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4); ARM_COMPUTE_ERROR_ON(weights_info.are_reshaped() && is_data_type_quantized_asymmetric(input->info()->data_type())); @@ -158,8 +174,8 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig // Set the GPU target for matrix multiply and im2col and col2im _mm_kernel.set_target(CLScheduler::get().target()); - _input_im2col_kernel.set_target(CLScheduler::get().target()); - _output_col2im_kernel.set_target(CLScheduler::get().target()); + _im2col_kernel.set_target(CLScheduler::get().target()); + _col2im_kernel.set_target(CLScheduler::get().target()); const bool append_bias = (biases != nullptr) && (!_is_quantized); _are_weights_reshaped = weights_info.are_reshaped(); @@ -183,7 +199,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig // Check if its a "fully connected" convolution const bool is_fully_connected_convolution = ((conv_w == 1) && (conv_h == 1)); - _is_interleaved_transposed = (!is_fully_connected_convolution && !_is_quantized); + _is_interleaved_transposed = (!is_fully_connected_convolution) && (!_is_quantized) && (_are_weights_reshaped); unsigned int mat_weights_cols = weights->info()->dimension(3); unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element; @@ -205,8 +221,9 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig } else { - // _weights_reshaped will be auto configured in the kernel - _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, _is_interleaved_transposed /* 1xW transpose */); + // _weights_reshaped will be auto configured in the kernel. + // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM + _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, false); weights = &_weights_reshaped; } @@ -221,11 +238,11 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig // FIXME: input->clone() doesn't work with subtensors for grouped convolutions. TensorInfo im2col_reshaped_info(shape_im2col, 1, dt, input->info()->fixed_point_position()); im2col_reshaped_info.set_quantization_info(input->info()->quantization_info()); - _input_im2col_reshaped.allocator()->init(im2col_reshaped_info); - _memory_group.manage(&_input_im2col_reshaped); + _im2col_output.allocator()->init(im2col_reshaped_info); + _memory_group.manage(&_im2col_output); // Create GEMM output tensor - TensorShape shape_gemm = _input_im2col_reshaped.info()->tensor_shape(); + TensorShape shape_gemm = _im2col_output.info()->tensor_shape(); shape_gemm.set(0, mat_weights_cols); shape_gemm.set(1, mat_input_rows); const DataType gemm_data_type = _is_quantized ? DataType::S32 : dt; @@ -237,24 +254,24 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig _memory_group.manage(&_gemm_output); // Configure im2col - _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, append_bias); + _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias); // Configure matrix multiply if(_is_interleaved_transposed) { // Configure GEMMInterleave4x4. _input_interleaved_reshaped will be auto configured in the kernel - _input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped); - _memory_group.manage(&_input_interleaved_reshaped); + _interleave_kernel.configure(&_im2col_output, &_interleave_output); + _memory_group.manage(&_interleave_output); // Configure GEMM - configure_mm(&_input_interleaved_reshaped, weights, &_gemm_output); - _input_interleaved_reshaped.allocator()->allocate(); + configure_mm(&_interleave_output, weights, &_gemm_output, true, _are_weights_reshaped); + _interleave_output.allocator()->allocate(); } else { - configure_mm(&_input_im2col_reshaped, weights, &_gemm_output, false); + configure_mm(&_im2col_output, weights, &_gemm_output, false, _are_weights_reshaped); } - _input_im2col_reshaped.allocator()->allocate(); + _im2col_output.allocator()->allocate(); // Configure output stage for quantized case if(_is_quantized) @@ -267,7 +284,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig } // Configure Col2Im - _output_col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h)); + _col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h)); if(_is_quantized) { _tmp_output.allocator()->allocate(); @@ -298,32 +315,39 @@ void CLConvolutionLayer::run() _memory_group.acquire(); // Run im2col - CLScheduler::get().enqueue(_input_im2col_kernel); + CLScheduler::get().enqueue(_im2col_kernel); + // Note: _is_interleaved_transposed is true only if the weights passed to the function have been passed already reshaped + // and if we do not have QASYMM8 data type. If this flag is true, we need to run the + // gemm kernel instead of gemm function if(_is_interleaved_transposed) { - // Run interleave4x4 - CLScheduler::get().enqueue(_input_interleave_kernel); - } + // Run interleave4x4 kernel + CLScheduler::get().enqueue(_interleave_kernel); - // Runs matrix multiply on reshaped matrices - if(_is_quantized) - { - _mm_gemmlowp.run(); + // Run matrix multiply kernel + CLScheduler::get().enqueue(_mm_kernel); } else { - CLScheduler::get().enqueue(_mm_kernel); - } + // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions + if(_is_quantized) + { + // Run gemmlowp + _mm_gemmlowp.run(); - // Run output stage for quantized case - if(_is_quantized) - { - _gemmlowp_output_stage.run(); + // Run output stage + _gemmlowp_output_stage.run(); + } + else + { + // Run gemm + _mm_gemm.run(); + } } // Reshape output matrix - CLScheduler::get().enqueue(_output_col2im_kernel, false); + CLScheduler::get().enqueue(_col2im_kernel, false); _memory_group.release(); } -- cgit v1.2.1