From 2d7e683e79c8ad328d4930c1f82a46827313faf4 Mon Sep 17 00:00:00 2001 From: George Wort Date: Fri, 22 Feb 2019 16:37:41 +0000 Subject: COMPMID-1694: Fuse offset contribution with the output stage when we use NEGEMMLowpMatrixMultiplyCore Change-Id: Ic1a681e4cc03e1eba3bf8485d9cdb17b3e926047 Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/561 Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- .../NEON/functions/NEGEMMConvolutionLayer.cpp | 241 +++++++++------------ .../functions/NEGEMMLowpMatrixMultiplyCore.cpp | 215 +++++++++++------- 2 files changed, 244 insertions(+), 212 deletions(-) (limited to 'src/runtime/NEON/functions') diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp index be7cc2d0e1..b6c37349c1 100644 --- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -90,16 +90,17 @@ void NEConvolutionLayerReshapeWeights::run() } NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr &memory_manager) - : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(), - _add_bias_kernel(), _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false), - _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false) + : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _add_bias_kernel(), + _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false), + _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false) { } -void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, int gemm_3d_depth) +void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act_info, int gemm_3d_depth) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights); - ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info(), gemm_3d_depth, _skip_im2col)); + ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output == nullptr ? nullptr : output->info(), act_info, gemm_3d_depth, + _skip_im2col)); const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */, gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */); @@ -114,7 +115,40 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset)); weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); - _mm_gemmlowp.configure(input, weights, nullptr, output, gemm_info); + const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quantization_info : output->info()->quantization_info(); + + float multiplier = input_quantization_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale; + int output_multiplier, output_shift; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + + // Merge activation with output stage + int min_activation = 0; + int max_activation = 0; + + const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, + ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU + }; + if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0) + { + const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP); + const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP); + + min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int; + max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; + + _is_activationlayer_enabled = false; + } + + GEMMLowpOutputStageInfo output_info; + output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT; + output_info.gemmlowp_offset = output_quant_info.offset; + output_info.gemmlowp_multiplier = output_multiplier; + output_info.gemmlowp_shift = output_shift; + output_info.gemmlowp_min_bound = min_activation; + output_info.gemmlowp_max_bound = max_activation; + + _mm_gemmlowp.configure(input, weights, biases, output, GEMMInfo(false, false, true, gemm_3d_depth, _skip_im2col, false, output_info)); // Revert back QuantizatioInfo as input and weights could be used in other convolution layers input->info()->set_quantization_info(input_quantization_info); @@ -127,9 +161,11 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w } } -Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth, bool skip_im2col) +Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act_info, + int gemm_3d_depth, bool skip_im2col) { - const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); + const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); + const bool is_activation_enabled = act_info.enabled(); const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */, gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */); @@ -145,8 +181,39 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset)); weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset)); + const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quantization_info : output->quantization_info(); + + float multiplier = input_quantization_info.scale * weights->quantization_info().scale / output_quant_info.scale; + int output_multiplier, output_shift; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + + // Merge activation with output stage + int min_activation = 0; + int max_activation = 0; + + const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, + ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU + }; + if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0) + { + const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP); + const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP); + + min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int; + max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; + } + + GEMMLowpOutputStageInfo output_info; + output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT; + output_info.gemmlowp_offset = output_quant_info.offset; + output_info.gemmlowp_multiplier = output_multiplier; + output_info.gemmlowp_shift = output_shift; + output_info.gemmlowp_min_bound = min_activation; + output_info.gemmlowp_max_bound = max_activation; + // Perform validation step on GEMMLowp - return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), nullptr, output, gemm_info); + return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, GEMMInfo(false, false, true, gemm_3d_depth, skip_im2col, false, output_info)); } else { @@ -155,19 +222,18 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens } } -Status NEGEMMConvolutionLayer::validate_gemm3d(DataType data_type, int gemm_3d_depth, bool skip_im2col) +Status NEGEMMConvolutionLayer::validate_gemm3d(const ITensorInfo *input_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col) { - const bool is_quantized = is_data_type_quantized_asymmetric(data_type); - const DataType output_gemm_data_type = is_quantized ? DataType::S32 : data_type; - const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth; - const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U; + const DataType data_type = input_info->data_type(); + const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth; + const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U; // Set dummy tensor shapes for the validation - const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type); + const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type, input_info->quantization_info()); const TensorInfo dummy_weights_info(TensorShape(4U, 4U), 1, data_type); - const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, output_gemm_data_type); + const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, data_type, input_info->quantization_info()); - return validate_mm(&dummy_input_info, &dummy_weights_info, &dummy_output_info, gemm_3d_depth, skip_im2col); + return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, gemm_3d_depth, skip_im2col); } void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info, @@ -202,9 +268,8 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig _append_bias = (biases != nullptr) && (!_is_quantized); _is_activationlayer_enabled = act_info.enabled(); - const ITensor *gemm_input_to_use = input; - ITensor *gemm_output_to_use = output; - ITensor *gemm_output_staged_to_use = output; + const ITensor *gemm_input_to_use = input; + ITensor *gemm_output_to_use = output; // Get convolved dimensions unsigned int conv_w = 0; @@ -219,7 +284,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig // Check if GEMM3D is supported if(data_layout == DataLayout::NHWC) { - _skip_col2im = bool(validate_gemm3d(input->info()->data_type(), conv_h, true)); + _skip_col2im = bool(validate_gemm3d(input->info(), act_info, conv_h, true)); // If not supported, we need to perform im2col and col2im (or reshape layer) if(!_skip_col2im) { @@ -262,26 +327,17 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig } // Create temporary GEMM output tensor in case we cannot skip col2im - if(!_skip_col2im || _is_quantized) + if(!_skip_col2im) { - // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input. - const DataType gemm_data_type = _is_quantized ? DataType::S32 : data_type; - TensorShape shape_gemm; + TensorShape shape_gemm; - if(_is_quantized && _skip_col2im) - { - shape_gemm = output->info()->tensor_shape(); - } - else - { - // Calculate GEMM output shape - shape_gemm = _im2col_output.info()->tensor_shape(); - shape_gemm.set(0, mat_weights_cols); - shape_gemm.set(1, conv_w * conv_h); - } + // Calculate GEMM output shape + shape_gemm = _im2col_output.info()->tensor_shape(); + shape_gemm.set(0, mat_weights_cols); + shape_gemm.set(1, conv_w * conv_h); // FIXME: input->clone() doesn't work with subtensors for grouped convolutions. - TensorInfo info_gemm(shape_gemm, 1, gemm_data_type); + TensorInfo info_gemm(shape_gemm, 1, data_type); info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout()); _gemm_output.allocator()->init(info_gemm); _memory_group.manage(&_gemm_output); @@ -293,62 +349,24 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig // Configure GEMM // In case we need to skip col2im, GEMM3D (gemm_3d_depth != 0) must be called in order to avoid reshaping the output matrix const unsigned int gemm_3d_depth = _skip_col2im ? conv_h : 0; - configure_mm(gemm_input_to_use, &_weights_reshaped, gemm_output_to_use, gemm_3d_depth); + configure_mm(gemm_input_to_use, &_weights_reshaped, biases, gemm_output_to_use, act_info, gemm_3d_depth); if(!_skip_im2col) { _im2col_output.allocator()->allocate(); } - // Configure output stage for quantized case - if(_is_quantized) - { - const QuantizationInfo input_quant_info = input->info()->quantization_info(); - const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quant_info : output->info()->quantization_info(); - - float multiplier = input_quant_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; - quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); - - if(!_skip_col2im) - { - _memory_group.manage(&_tmp_output); - gemm_output_staged_to_use = &_tmp_output; - } - - // Merge activation with output stage - int min_activation = 0; - int max_activation = 0; - - const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, - ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, - ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU - }; - if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0) - { - const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP); - const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP); - - min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int; - max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; - - _is_activationlayer_enabled = false; - } - - _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, output_multiplier, output_shift, output_quant_info.offset, min_activation, max_activation); - } - if(!_skip_col2im) { if(_data_layout == DataLayout::NCHW) { // Configure col2im - _col2im_kernel.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, Size2D(conv_w, conv_h)); + _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h)); } else { // Configure reshape layer - _reshape_layer.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output); + _reshape_layer.configure(gemm_output_to_use, output); } } @@ -395,10 +413,9 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI const unsigned int kernel_height = weights->dimension(idx_height); TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info; - const ITensorInfo *gemm_input_to_use = input; - const ITensorInfo *gemm_output_to_use = output; - const ITensorInfo *gemm_output_staged_to_use = output; - const ITensorInfo *weights_to_use = weights; + const ITensorInfo *gemm_input_to_use = input; + const ITensorInfo *gemm_output_to_use = output; + const ITensorInfo *weights_to_use = weights; const bool is_quantized = is_data_type_quantized_asymmetric(data_type); const bool append_bias = (biases != nullptr) && (!is_quantized); @@ -420,7 +437,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI bool skip_col2im = false; if(data_layout == DataLayout::NHWC) { - skip_col2im = bool(validate_gemm3d(input->data_type(), conv_h, true)); + skip_col2im = bool(validate_gemm3d(input, act_info, conv_h, true)); // If not supported, we need to perform im2col and col2im (or reshape layer) if(!skip_col2im) { @@ -431,7 +448,7 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI if(skip_col2im) { // If not supported, we need to perform im2col and col2im (or reshape layer) - if(!bool(validate_gemm3d(input->data_type(), conv_h, skip_im2col))) + if(!bool(validate_gemm3d(input, act_info, conv_h, skip_im2col))) { skip_im2col = false; skip_col2im = false; @@ -495,68 +512,25 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI } // Create temporary GEMM output tensor in case we cannot skip col2im - const DataType gemm_data_type = is_quantized ? DataType::S32 : data_type; if(!skip_col2im) { TensorShape shape_gemm = gemm_input_to_use->tensor_shape(); shape_gemm.set(0, mat_weights_cols); shape_gemm.set(1, conv_w * conv_h); - info_gemm = TensorInfo(shape_gemm, 1, gemm_data_type); + info_gemm = TensorInfo(shape_gemm, 1, data_type); } else { - info_gemm = TensorInfo(output->tensor_shape(), 1, gemm_data_type); + info_gemm = TensorInfo(output->tensor_shape(), 1, data_type); } info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout()); gemm_output_to_use = &info_gemm; - - ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, gemm_output_to_use, skip_col2im ? conv_h : 0, skip_im2col)); - - if(is_quantized) - { - const QuantizationInfo input_quant_info = input->quantization_info(); - const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quant_info : output->quantization_info(); - const float multiplier = input_quant_info.scale * weights_to_use->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; - quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); - - if(!skip_col2im) - { - tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8); - tmp_info.set_quantization_info(output->quantization_info()).set_data_layout(data_layout); - gemm_output_staged_to_use = &tmp_info; - } - - // Merge activation with output stage - int min_activation = 0; - int max_activation = 0; - - const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, - ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, - ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU - }; - - if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0) - { - const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP); - const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP); - - min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int; - max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; - - is_activation_enabled = false; - } - - // Validate output stage for quantized case - NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use, min_activation, max_activation); - } + ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, act_info, skip_col2im ? conv_h : 0, skip_im2col)); // Validate Col2Im/ReshapeLayer if(!skip_col2im && (data_layout == DataLayout::NCHW)) { - ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, - output, - Size2D(conv_w, conv_h))); + ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h))); } //Validate Activation Layer @@ -586,9 +560,6 @@ void NEGEMMConvolutionLayer::run() { // Run gemmlowp _mm_gemmlowp.run(); - - // Run output stage - _gemmlowp_output_stage.run(); } else { diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp index 5286f113a5..85e49fd265 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp @@ -42,8 +42,8 @@ using namespace arm_compute::misc::shape_calculator; NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr memory_manager) : _memory_group(memory_manager), _asm_glue(memory_manager), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), - _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _original_b(nullptr), _a_offset(0), _b_offset(0), _run_vector_matrix_multiplication(false), - _dot_product_path(false), _reshape_b_only_on_first_run(false), _is_prepared(false) + _offset_contribution_kernel(), _offset_contribution_output_stage_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _mm_result_s32(), _original_b(nullptr), _a_offset(0), _b_offset(0), + _run_vector_matrix_multiplication(false), _dot_product_path(false), _reshape_b_only_on_first_run(false), _is_prepared(false), _fuse_output_stage(false) { } @@ -53,6 +53,9 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ARM_COMPUTE_UNUSED(c); ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info)); + const ITensor *matrix_a = a; + const ITensor *matrix_b = b; + // Clear state _mtx_a_reshape_kernel = nullptr; _mtx_b_reshape_kernel = nullptr; @@ -65,6 +68,18 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, _is_prepared = false; _original_b = b; + // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage + if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE) + { + _fuse_output_stage = true; + + _memory_group.manage(&_mm_result_s32); + + TensorInfo info_mm_result_s32(output->info()->tensor_shape(), 1, DataType::S32); + + _mm_result_s32.allocator()->init(info_mm_result_s32); + } + #ifdef __aarch64__ switch(a->info()->data_type()) { @@ -72,7 +87,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, case DataType::U8: case DataType::S8: { - _asm_glue.configure(a, b, output, 1.f, 0.f, _reshape_b_only_on_first_run); + _asm_glue.configure(a, b, _fuse_output_stage ? &_mm_result_s32 : output, 1.f, 0.f, _reshape_b_only_on_first_run); _dot_product_path = _asm_glue.is_configured(); break; } @@ -83,51 +98,35 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, } } #endif /* __aarch64__ */ - if(!_dot_product_path) + if(!(_dot_product_path || _run_vector_matrix_multiplication)) { - if(_run_vector_matrix_multiplication) + matrix_a = &_tmp_a; + matrix_b = &_tmp_b; + + // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] + TensorInfo a_info(compute_interleaved_shape(*a->info()), 1, a->info()->data_type()); + // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ] + TensorInfo b_info(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type()); + _tmp_a.allocator()->init(a_info); + _tmp_b.allocator()->init(b_info); + _memory_group.manage(&_tmp_a); + if(!_reshape_b_only_on_first_run) { - // Configure matrix multiply kernel - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(a, b, output); - _mm_kernel = std::move(k); - } + _memory_group.manage(&_tmp_b); } - else - { - // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] - TensorInfo info_a = a->info()->clone()->set_tensor_shape(compute_interleaved_shape(*a->info())).set_is_resizable(true); - // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ] - TensorInfo info_b = b->info()->clone()->set_tensor_shape(compute_transpose1xW_shape(*b->info())).set_is_resizable(true); - _tmp_a.allocator()->init(info_a); - _tmp_b.allocator()->init(info_b); - _memory_group.manage(&_tmp_a); - if(!_reshape_b_only_on_first_run) - { - _memory_group.manage(&_tmp_b); - } - // Configure interleave kernel - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(a, &_tmp_a); - _mtx_a_reshape_kernel = std::move(k); - } - - // Configure transpose kernel - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(b, &_tmp_b); - _mtx_b_reshape_kernel = std::move(k); - } + // Configure interleave kernel + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(a, &_tmp_a); + _mtx_a_reshape_kernel = std::move(k); + } - // Configure matrix multiply kernel - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(&_tmp_a, &_tmp_b, output); - _mm_kernel = std::move(k); - } + // Configure transpose kernel + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(b, &_tmp_b); + _mtx_b_reshape_kernel = std::move(k); } } @@ -158,8 +157,33 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, _mtx_a_reduction_kernel.configure(a, &_vector_sum_row, a->info()->dimension(0), false); } - // Configure offset contribution kernel - _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset); + if(_fuse_output_stage) + { + // Configure matrix multiply kernel + if(!_dot_product_path) + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(matrix_a, matrix_b, &_mm_result_s32); + _mm_kernel = std::move(k); + } + + _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0), + _a_offset, _b_offset, gemm_info.gemmlowp_output_stage()); + + _mm_result_s32.allocator()->allocate(); + } + else + { + // Configure matrix multiply kernel + if(!_dot_product_path) + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(matrix_a, matrix_b, output); + _mm_kernel = std::move(k); + } + // Configure offset contribution kernel + _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset); + } // Allocate tensors if(!_dot_product_path && !_run_vector_matrix_multiplication) @@ -185,43 +209,53 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::QASYMM8); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::NONE, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore for output S32"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); + const ITensorInfo *matrix_a_info = a; + const ITensorInfo *matrix_b_info = b; + + TensorInfo tmp_a_info{}; + TensorInfo tmp_b_info{}; + TensorInfo mm_result_s32_info{}; + int32_t a_offset = a->quantization_info().offset; int32_t b_offset = b->quantization_info().offset; const bool reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); + bool fuse_output_stage = gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE; + if(fuse_output_stage) + { + auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(output->tensor_shape()).set_data_type(DataType::S32)); + } + // Check if we need to run the optimized assembly kernel - const bool run_optimised = bool(NEGEMMAssemblyDispatch::validate(a, b, output, 1.f, 0.f, reshape_b_only_on_first_run)); + const bool run_optimised = bool(NEGEMMAssemblyDispatch::validate(a, b, fuse_output_stage ? &mm_result_s32_info : output, 1.f, 0.f, reshape_b_only_on_first_run)); if(run_optimised) { - if(output->total_size() != 0) + ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0)); + if(gemm_info.depth_output_gemm3d() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0)); - if(gemm_info.depth_output_gemm3d() != 0) + if(gemm_info.reinterpret_input_as_3d()) { - if(gemm_info.reinterpret_input_as_3d()) - { - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1)); - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2)); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2)); - } + ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1)); + ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2)); } else { - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1)); + ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2)); } } + else + { + ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1)); + } } else { @@ -231,6 +265,9 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso const bool run_vector_matrix_multiplication = a->dimension(1) < 2; if(!run_vector_matrix_multiplication) { + matrix_a_info = &tmp_a_info; + matrix_b_info = &tmp_b_info; + // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] TensorShape shape_tmp_a = a->tensor_shape(); shape_tmp_a.set(0, a->dimension(0) * 4); @@ -241,16 +278,12 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso shape_tmp_b.set(0, b->dimension(1) * 16); shape_tmp_b.set(1, std::ceil(b->dimension(0) / 16.f)); - TensorInfo info_a = a->clone()->set_tensor_shape(shape_tmp_a).set_is_resizable(true); - TensorInfo info_b = b->clone()->set_tensor_shape(shape_tmp_b).set_is_resizable(true); + // Validate interleave kernel + auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(shape_tmp_a)); + auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b)); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &info_a)); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &info_b)); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(&info_a, &info_b, output)); - } - else - { - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(a, b, output)); + ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &tmp_a_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info)); } } @@ -274,12 +307,32 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row, a->dimension(0), false)); } - // Validate offset contribution kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionKernel::validate(output, - a_offset == 0 ? nullptr : &info_vector_sum_col, - b_offset == 0 ? nullptr : &info_vector_sum_row, - a_offset, b_offset)); + if(fuse_output_stage) + { + if(!run_optimised) + { + ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info)); + } + // Validate offset contribution kernel + ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + c, output, a_offset, b_offset, + gemm_info.gemmlowp_output_stage())); + } + else + { + if(!run_optimised) + { + ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output)); + } + // Validate offset contribution kernel + ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionKernel::validate(output, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + a_offset, b_offset)); + } return Status{}; } @@ -321,8 +374,16 @@ void NEGEMMLowpMatrixMultiplyCore::run() NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX); } - // Run offset contribution kernel - NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY); + if(_fuse_output_stage) + { + // Run offset contribution kernel + NEScheduler::get().schedule(&_offset_contribution_output_stage_kernel, Window::DimY); + } + else + { + // Run offset contribution kernel + NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY); + } _memory_group.release(); } -- cgit v1.2.1