From 4b90865ab985d571f70c60583cdfb8c7a65f1670 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 18 Oct 2018 10:21:02 +0100 Subject: COMPMID-1413 - Improve the performance of GEMMLowp with 8 bit dot product on OpenCL COMPMID-1424 - Add dot product support for CLDepthwise QASYMM8 3x3 NHWC non-unit stride With this patch we are able to improve the performance of MobileNet v1-qasymm8 by 37 % Tried to use the dot product instruction in CLDepthwise QASYMM8 3x3 NHWC non-unit stride but I have not seen any benefit (maybe because we have few arithemtic operation and we do not have more load instructions). However Depthwise convolution has been improved by 30% Change-Id: Id768a99c2e53a04276707e427af5d0ec93419ada Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/155082 Tested-by: bsgcomp Reviewed-by: Georgios Pinitas --- src/runtime/CL/functions/CLFullyConnectedLayer.cpp | 3 +- .../CL/functions/CLGEMMConvolutionLayer.cpp | 236 ++++++++++++--------- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 107 +++++++--- src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp | 8 +- .../NEON/functions/NEFullyConnectedLayer.cpp | 3 +- .../NEON/functions/NEGEMMConvolutionLayer.cpp | 4 +- .../functions/NEGEMMLowpMatrixMultiplyCore.cpp | 8 +- 7 files changed, 235 insertions(+), 134 deletions(-) (limited to 'src/runtime') diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp index 010985db06..c5637dba26 100644 --- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp +++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp @@ -49,6 +49,7 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I // Validate gemmlowp function ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info), &weights.clone()->set_quantization_info(weights_quantization_info), + nullptr, &output)); } else @@ -91,7 +92,7 @@ void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); // Configure gemmlowp function - _mm_gemmlowp.configure(input, weights, output); + _mm_gemmlowp.configure(input, weights, nullptr, output); // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers input->info()->set_quantization_info(input_quantization_info); diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index 61180fd5d3..67f55d56e2 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -91,19 +91,21 @@ void CLConvolutionLayerReshapeWeights::run() } CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr memory_manager) - : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(), - _add_bias_kernel(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false), - _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false) + : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _add_bias_kernel(), + _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), + _is_activationlayer_enabled(false), _is_prepared(false) { } -void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, int gemm_3d_depth) +void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage, + int gemm_3d_depth) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights); - ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info(), gemm_3d_depth, _skip_im2col)); + ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col)); const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */, - gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */); + gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */, + false, gemmlowp_output_stage); if(_is_quantized) { @@ -115,7 +117,7 @@ void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTenso input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset)); weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); - _mm_gemmlowp.configure(input, weights, output, gemm_info); + _mm_gemmlowp.configure(input, weights, biases, output, gemm_info); // Revert back QuantizatioInfo as input and weights could be used in other convolution layers input->info()->set_quantization_info(input_quantization_info); @@ -128,12 +130,14 @@ void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTenso } } -Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth, bool skip_im2col) +Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, + const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col) { const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */, - gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */); + gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */, + false, gemmlowp_output_stage); if(is_quantized) { @@ -148,7 +152,7 @@ Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); // Perform validation step on GEMMLowp - return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), output, gemm_info); + return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info); } else { @@ -176,27 +180,26 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * const DataLayout data_layout = input->info()->data_layout(); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); const unsigned int kernel_width = weights->info()->dimension(idx_width); const unsigned int kernel_height = weights->info()->dimension(idx_height); - _is_prepared = weights_info.retain_internal_weights(); - _original_weights = weights; - _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); - _data_layout = data_layout; - _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1); - _skip_col2im = data_layout == DataLayout::NHWC; - _append_bias = (biases != nullptr) && (!_is_quantized); + _is_prepared = weights_info.retain_internal_weights(); + _original_weights = weights; + _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); + _data_layout = data_layout; + _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1); + _skip_col2im = data_layout == DataLayout::NHWC; + _append_bias = (biases != nullptr) && (!_is_quantized); + _is_activationlayer_enabled = act_info.enabled(); // Set the GPU target for im2col and col2im _im2col_kernel.set_target(CLScheduler::get().target()); _col2im_kernel.set_target(CLScheduler::get().target()); - const ICLTensor *gemm_input_to_use = input; - ICLTensor *gemm_output_to_use = output; - ICLTensor *gemm_output_staged_to_use = output; + const ICLTensor *gemm_input_to_use = input; + ICLTensor *gemm_output_to_use = output; const ICLTensor *biases_to_use = (_append_bias && !_skip_im2col) ? biases : nullptr; @@ -243,26 +246,17 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * } // Create GEMM output tensor - if(!_skip_col2im || _is_quantized) + if(!_skip_col2im) { TensorShape shape_gemm; - if(_skip_col2im) - { - shape_gemm = input->info()->tensor_shape(); - shape_gemm.set(idx_width, conv_w); - shape_gemm.set(idx_height, conv_h); - shape_gemm.set(idx_channel, mat_weights_cols); - } - else - { - shape_gemm = _im2col_output.info()->tensor_shape(); - shape_gemm.set(0, mat_weights_cols); - shape_gemm.set(1, conv_w * conv_h); - } - // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input. - const DataType gemm_data_type = _is_quantized ? DataType::S32 : data_type; + + // If we cannot skip col2im it means we run im2col as well + shape_gemm = _im2col_output.info()->tensor_shape(); + shape_gemm.set(0, mat_weights_cols); + shape_gemm.set(1, conv_w * conv_h); + // FIXME: input->clone() doesn't work with subtensors for grouped convolutions. - TensorInfo info_gemm(shape_gemm, 1, gemm_data_type); + TensorInfo info_gemm(shape_gemm, 1, data_type); info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout()); _gemm_output.allocator()->init(info_gemm); _memory_group.manage(&_gemm_output); @@ -271,42 +265,64 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * gemm_output_to_use = &_gemm_output; } - // Configure and tune GEMM - configure_mm(gemm_input_to_use, &_weights_reshaped, gemm_output_to_use, (data_layout == DataLayout::NHWC) ? conv_h : 1); - - if(!_skip_im2col) - { - _im2col_output.allocator()->allocate(); - } + GEMMLowpOutputStageInfo gemmlowp_output_stage; + gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT; + gemmlowp_output_stage.gemmlowp_offset = 0; + gemmlowp_output_stage.gemmlowp_multiplier = 0; + gemmlowp_output_stage.gemmlowp_shift = 0; // Configure output stage for quantized case if(_is_quantized) { const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info(); - if(!_skip_col2im) + float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale; + int output_multiplier, output_shift; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + + int min_activation = 0; + int max_activation = 0; + + const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU + }; + + if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0) { - _memory_group.manage(&_tmp_output); - gemm_output_staged_to_use = &_tmp_output; + const int a_const_int = input->info()->quantization_info().quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP); + const int b_const_int = input->info()->quantization_info().quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP); + + min_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? input->info()->quantization_info().offset : b_const_int; + max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; + + // If the activation layer is RELU, BOUNDED_RELU or LU_BOUNDED_RELU, we can use the GEMMLowp output stage to perform this operation + _is_activationlayer_enabled = false; } - float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale; - _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, multiplier, output_quant_info.offset); + // Set the GEMMLowp output stage info + gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset; + gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier; + gemmlowp_output_stage.gemmlowp_shift = output_shift; + gemmlowp_output_stage.gemmlowp_min_bound = min_activation; + gemmlowp_output_stage.gemmlowp_max_bound = max_activation; } - if(!_skip_col2im) + // Configure and tune GEMM + configure_mm(gemm_input_to_use, &_weights_reshaped, biases, gemm_output_to_use, gemmlowp_output_stage, (data_layout == DataLayout::NHWC) ? conv_h : 1); + + if(!_skip_im2col) { - // Configure and tune Col2Im - _col2im_kernel.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups); - CLScheduler::get().tune_kernel_static(_col2im_kernel); + _im2col_output.allocator()->allocate(); } if(!_skip_col2im) { - _tmp_output.allocator()->allocate(); + // Configure and tune Col2Im + _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups); + CLScheduler::get().tune_kernel_static(_col2im_kernel); } - if(!_skip_col2im || _is_quantized) + if(!_skip_col2im) { _gemm_output.allocator()->allocate(); } @@ -314,9 +330,6 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h), "Output shape does not match the expected one"); - //Configure Activation Layer - _is_activationlayer_enabled = act_info.enabled(); - if(_is_activationlayer_enabled) { _activationlayer_function.configure(output, nullptr, act_info); @@ -347,16 +360,16 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI const unsigned int kernel_width = weights->dimension(idx_width); const unsigned int kernel_height = weights->dimension(idx_height); - TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info; - const ITensorInfo *gemm_input_to_use = input; - const ITensorInfo *gemm_output_to_use = output; - const ITensorInfo *gemm_output_staged_to_use = output; - const ITensorInfo *weights_to_use = weights; + TensorInfo im2col_reshaped_info, info_gemm, weights_reshaped_info; + const ITensorInfo *gemm_input_to_use = input; + const ITensorInfo *gemm_output_to_use = output; + const ITensorInfo *weights_to_use = weights; - const bool is_quantized = is_data_type_quantized_asymmetric(data_type); - const bool append_bias = (biases != nullptr) && (!is_quantized); - const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1); - const bool skip_col2im = data_layout == DataLayout::NHWC; + const bool is_quantized = is_data_type_quantized_asymmetric(data_type); + const bool append_bias = (biases != nullptr) && (!is_quantized); + const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1); + const bool skip_col2im = data_layout == DataLayout::NHWC; + bool is_activationlayer_enabled = act_info.enabled(); ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel)); ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4); @@ -418,52 +431,80 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI } // Create GEMM output tensor - if(!skip_col2im || is_quantized) + if(!skip_col2im) { - const DataType gemm_data_type = is_quantized ? DataType::S32 : data_type; - TensorShape shape_gemm; - if(skip_col2im) - { - shape_gemm = input->tensor_shape(); - shape_gemm.set(idx_width, conv_w); - shape_gemm.set(idx_height, conv_h); - shape_gemm.set(idx_channel, mat_weights_cols); - } - else - { - shape_gemm = gemm_input_to_use->tensor_shape(); - shape_gemm.set(0, mat_weights_cols); - shape_gemm.set(1, conv_w * conv_h); - } - // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input. - info_gemm = TensorInfo(shape_gemm, 1, gemm_data_type); + TensorShape shape_gemm; + + shape_gemm = gemm_input_to_use->tensor_shape(); + shape_gemm.set(0, mat_weights_cols); + shape_gemm.set(1, conv_w * conv_h); + + info_gemm = TensorInfo(shape_gemm, 1, data_type); info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout()); gemm_output_to_use = &info_gemm; } - ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, gemm_output_to_use, skip_col2im ? conv_h : 1, skip_im2col)); + GEMMLowpOutputStageInfo gemmlowp_output_stage; + gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT; + gemmlowp_output_stage.gemmlowp_offset = 0; + gemmlowp_output_stage.gemmlowp_multiplier = 0; + gemmlowp_output_stage.gemmlowp_shift = 0; if(is_quantized) { - if(!skip_col2im) + const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input->quantization_info() : output->quantization_info(); + + float multiplier = input->quantization_info().scale * weights->quantization_info().scale / output_quant_info.scale; + int output_multiplier, output_shift; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + + int min_activation = 0; + int max_activation = 0; + + const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, + ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU + }; + + if(is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0) { - tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8); - tmp_info.set_quantization_info(output->quantization_info()); - gemm_output_staged_to_use = &tmp_info; + const int a_const_int = input->quantization_info().quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP); + const int b_const_int = input->quantization_info().quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP); + + min_activation = b_const_int; + max_activation = a_const_int; + + if(act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) + { + min_activation = input->quantization_info().offset; + } + if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU) + { + max_activation = 255; + } + + // If the activation layer is RELU, BOUNDED_RELU or LU_BOUNDED_RELU, we can use the GEMMLowp output stage to perform this operation + is_activationlayer_enabled = false; + + // Set the GEMMLowp output stage info + gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset; + gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier; + gemmlowp_output_stage.gemmlowp_shift = output_shift; + gemmlowp_output_stage.gemmlowp_min_bound = min_activation; + gemmlowp_output_stage.gemmlowp_max_bound = max_activation; } - // Validate output stage for quantized case - CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use); } + ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, gemmlowp_output_stage, skip_col2im ? conv_h : 1, skip_im2col)); + // Validate Col2Im if(!skip_col2im) { - ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, - Size2D(conv_w, conv_h), num_groups)); + ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups)); } //Validate Activation Layer - if(act_info.enabled()) + if(is_activationlayer_enabled) { ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info)); } @@ -488,9 +529,6 @@ void CLGEMMConvolutionLayer::run() { // Run gemmlowp _mm_gemmlowp.run(); - - // Run output stage - _gemmlowp_output_stage.run(); } else { diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index f79fb43073..f2efb3249b 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -42,7 +42,7 @@ inline bool is_interleaved_transposed(int m, int n, int k, bool reshape_b_only_o bool flag = true; if(gpu_target_is_in(gpu_target, - GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G52, GPUTarget::G52LIT)) { @@ -56,6 +56,10 @@ inline bool is_interleaved_transposed(int m, int n, int k, bool reshape_b_only_o flag = false; } } + else + { + flag = m > 1; + } return flag; } @@ -69,24 +73,26 @@ CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptrinfo(), b->info(), output->info(), gemm_info)); + ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info)); _is_prepared = false; _original_b = b; @@ -108,6 +114,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo // in order to know how the matrices have been reshaped bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); + const bool unroll_block = dot8_supported(CLKernelLibrary::get().get_device()); const int m = reinterpret_input_as_3d ? (a->info()->dimension(1) * a->info()->dimension(2)) : a->info()->dimension(1); const int n = b->info()->dimension(0); const int k = a->info()->dimension(0); @@ -133,15 +140,11 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor } // Configure interleave kernel - _mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()); + _mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d(), unroll_block); // Configure transpose kernel _mtx_b_reshape_kernel.configure(b, &_tmp_b, mult_transpose1xW_width); } - // Configure matrix multiply kernel - _mm_kernel.configure(matrix_a, matrix_b, output, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, - mult_transpose1xW_width, mult_interleave4x4_height, - depth_output_gemm3d, reinterpret_input_as_3d)); // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 if(_a_offset != 0) @@ -168,8 +171,34 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor _mtx_a_reduction_kernel.configure(a, &_vector_sum_row); } - // Configure offset contribution kernel - _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset); + // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage + if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE) + { + _fuse_output_stage = true; + + _memory_group.manage(&_mm_result_s32); + + // Configure matrix multiply kernel + _mm_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, + mult_transpose1xW_width, mult_interleave4x4_height, + depth_output_gemm3d, reinterpret_input_as_3d)); + + // Configure offset contribution kernel + _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0), + _a_offset, _b_offset, gemm_info.gemmlowp_output_stage()); + + _mm_result_s32.allocator()->allocate(); + } + else + { + // Configure matrix multiply kernel + _mm_kernel.configure(matrix_a, matrix_b, output, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, + mult_transpose1xW_width, mult_interleave4x4_height, + depth_output_gemm3d, reinterpret_input_as_3d)); + + // Configure offset contribution kernel + _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, a->info()->dimension(0), _a_offset, _b_offset); + } // Allocate tensors if(_is_interleaved_transposed) @@ -192,10 +221,9 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor } } -Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info) +Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); @@ -241,9 +269,6 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &tmp_b_info, mult_transpose1xW_width)); } - // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output, reshape_matrices, reshape_info)); - TensorInfo info_vector_sum_col, info_vector_sum_row; // Validate matrix B reduction kernel only if _a_offset is not equal to 0 @@ -264,11 +289,37 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row)); } - // Validate offset contribution kernel - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output, - a_offset == 0 ? nullptr : &info_vector_sum_col, - b_offset == 0 ? nullptr : &info_vector_sum_row, - a_offset, b_offset)); + if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE) + { + TensorInfo mm_result_s32_info{}; + + // Output tensor auto inizialitation if not yet initialized + auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_matrices, reshape_info)).set_data_type(DataType::S32)); + + // Validate matrix multiply + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, reshape_matrices, reshape_info)); + + // Validate offset contribution kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + c, + output, + a_offset, b_offset, + gemm_info.gemmlowp_output_stage())); + } + else + { + // Validate matrix multiply + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output, reshape_matrices, reshape_info)); + + // Validate offset contribution kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + c, + a_offset, b_offset)); + } return Status{}; } @@ -306,8 +357,16 @@ void CLGEMMLowpMatrixMultiplyCore::run() CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false); } - // Run offset contribution kernel - CLScheduler::get().enqueue(_offset_contribution_kernel, true); + if(_fuse_output_stage) + { + // Run offset contribution/output stage kernel + CLScheduler::get().enqueue(_offset_contribution_output_stage_kernel, true); + } + else + { + // Run offset contribution kernel + CLScheduler::get().enqueue(_offset_contribution_kernel, true); + } _memory_group.release(); } diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp index f5dc655776..f1c24626dc 100644 --- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp @@ -45,17 +45,17 @@ Status CLGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *inpu void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, - int min, int max, unsigned int output_3d_depth) + int min, int max) { auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, output_3d_depth); + k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); _kernel = std::move(k); } Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, - int min, int max, unsigned int output_3d_depth) + int min, int max) { - return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max, output_3d_depth); + return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max); } void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index 60f6294394..45e21b53d1 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -50,6 +50,7 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I // Validate gemmlowp function ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info), &weights.clone()->set_quantization_info(weights_quantization_info), + nullptr, &output)); } else @@ -93,7 +94,7 @@ void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *we weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); // Configure gemmlowp function - _mm_gemmlowp.configure(input, weights, output); + _mm_gemmlowp.configure(input, weights, nullptr, output); // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers input->info()->set_quantization_info(input_quantization_info); diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp index fb6d4a1847..fc65469488 100644 --- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp @@ -111,7 +111,7 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset)); weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); - _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); + _mm_gemmlowp.configure(input, weights, nullptr, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); // Revert back QuantizatioInfo as input and weights could be used in other convolution layers input->info()->set_quantization_info(input_quantization_info); @@ -143,7 +143,7 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); // Perform validation step on GEMMLowp - return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), output, gemm_info); + return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), nullptr, output, gemm_info); } else { diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp index 828011d019..80f5ab0c93 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp @@ -47,10 +47,11 @@ NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptrinfo(), b->info(), output->info(), gemm_info)); + ARM_COMPUTE_UNUSED(c); + ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info)); // Clear state _mtx_a_reshape_kernel = nullptr; @@ -181,11 +182,12 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, } } -Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info) +Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(1) != (output)->dimension(1), -- cgit v1.2.1