From de5a1cc7e5c929b19fb1d3ed7d0d8783b9ac6860 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 2 Feb 2018 12:52:07 +0000 Subject: COMPMID-856: CL Depthwise Convolution QASYMM8 support Change-Id: Ic6097e7cf160e8b829fb521b7b99d9a57d9799d3 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118774 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- .../CL/functions/CLDepthwiseConvolutionLayer.cpp | 61 +++++++++++++++------- 1 file changed, 43 insertions(+), 18 deletions(-) (limited to 'src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp') diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp index 02273fe08b..fcf3969515 100644 --- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/PixelValue.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/CL/CLScheduler.h" #include "support/ToolchainSupport.h" using namespace arm_compute; +using namespace arm_compute::misc; CLDepthwiseConvolutionLayer3x3::CLDepthwiseConvolutionLayer3x3() : _kernel(), _border_handler() @@ -59,14 +61,14 @@ void CLDepthwiseConvolutionLayer3x3::run() } CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer() - : _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _v2mm_input_fill_border(), _v2mm_weights_fill_border(), _input_reshaped(), _weights_reshaped(), - _v2mm_output() + : _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _output_stage_kernel(), _v2mm_input_fill_border(), _v2mm_weights_fill_border(), _input_reshaped(), + _weights_reshaped(), _v2mm_output(), _output_reshaped(), _is_quantized(false) { } void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != weights->info()->dimension(2)); @@ -74,15 +76,17 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w const size_t weights_h = weights->info()->dimension(1); const size_t weights_z = weights->info()->dimension(2); - const bool has_bias = (biases != nullptr); - const GPUTarget gpu_target = CLScheduler::get().target(); + _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); + + bool append_bias = (biases != nullptr) && !_is_quantized; + const GPUTarget gpu_target = CLScheduler::get().target(); unsigned int conv_w = 0; unsigned int conv_h = 0; std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights_w, weights_h, conv_info); // Set up intermediate tensors - const size_t patch_size = weights_w * weights_h + ((has_bias) ? 1 : 0); + const size_t patch_size = weights_w * weights_h + ((append_bias) ? 1 : 0); const size_t conv_size = conv_w * conv_h; // Im2Col configuration @@ -90,33 +94,49 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w shape_im2col.set(0, patch_size); shape_im2col.set(1, conv_size); shape_im2col.set(2, weights_z); - const TensorInfo info_im2col(shape_im2col, 1, input->info()->data_type(), input->info()->fixed_point_position()); - _input_reshaped.allocator()->init(info_im2col); + _input_reshaped.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col)); _im2col_kernel.set_target(gpu_target); - _im2col_kernel.configure(input, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, has_bias); + _im2col_kernel.configure(input, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias); // Weights reshape configuration const TensorShape shape_weights_reshape(patch_size, weights_z); - const TensorInfo info_weights_reshape(shape_weights_reshape, 1, weights->info()->data_type(), weights->info()->fixed_point_position()); - _weights_reshaped.allocator()->init(info_weights_reshape); - _weights_reshape_kernel.configure(weights, &_weights_reshaped, biases); + _weights_reshaped.allocator()->init(weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_weights_reshape)); + _weights_reshape_kernel.configure(weights, &_weights_reshaped, append_bias ? biases : nullptr); // GEMV configuration + DataType v2mm_dt = (input->info()->data_type() == DataType::QASYMM8) ? DataType::S32 : input->info()->data_type(); TensorShape shape_v2mm_out = input->info()->tensor_shape(); shape_v2mm_out.set(0, conv_size * weights_z); shape_v2mm_out.set(1, 1); shape_v2mm_out.set(2, 1); - const TensorInfo info_v2mm_out(shape_v2mm_out, 1, input->info()->data_type(), input->info()->fixed_point_position()); - _v2mm_output.allocator()->init(info_v2mm_out); + _v2mm_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(v2mm_dt).set_tensor_shape(shape_v2mm_out)); _v2mm_kernel.set_target(gpu_target); _v2mm_kernel.configure(&_input_reshaped, &_weights_reshaped, &_v2mm_output); - _vector_to_tensor_kernel.configure(&_v2mm_output, output, conv_w, conv_h); + _vector_to_tensor_kernel.configure(&_v2mm_output, (_is_quantized) ? &_output_reshaped : output, conv_w, conv_h); + + // Output staged configuration + if(_is_quantized) + { + float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale; + int output_multiplier, output_shift; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + _output_stage_kernel.configure(&_output_reshaped, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset); + _output_reshaped.allocator()->allocate(); + } + // Fill borders on inputs + PixelValue zero_in(static_cast(0)); + PixelValue zero_w(static_cast(0)); + if(_is_quantized) + { + zero_in = PixelValue(static_cast(input->info()->quantization_info().offset)); + zero_w = PixelValue(static_cast(weights->info()->quantization_info().offset)); + } BorderSize border_size = _v2mm_kernel.border_size(); - _v2mm_input_fill_border.configure(&_input_reshaped, border_size, BorderMode::CONSTANT, PixelValue(0)); + _v2mm_input_fill_border.configure(&_input_reshaped, border_size, BorderMode::CONSTANT, zero_in); border_size.bottom = 0; - _v2mm_weights_fill_border.configure(&_weights_reshaped, border_size, BorderMode::CONSTANT, PixelValue(0)); + _v2mm_weights_fill_border.configure(&_weights_reshaped, border_size, BorderMode::CONSTANT, zero_w); // Allocate intermediate tensors _input_reshaped.allocator()->allocate(); @@ -135,4 +155,9 @@ void CLDepthwiseConvolutionLayer::run() CLScheduler::get().enqueue(_v2mm_kernel); CLScheduler::get().enqueue(_vector_to_tensor_kernel); + + if(_is_quantized) + { + CLScheduler::get().enqueue(_output_stage_kernel); + } } -- cgit v1.2.1