diff options
Diffstat (limited to 'src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp')
-rw-r--r-- | src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp | 327 |
1 files changed, 327 insertions, 0 deletions
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp index 43288ec4c6..45cc2d2762 100644 --- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp @@ -363,6 +363,333 @@ void NEDepthwiseConvolutionLayer3x3::prepare() } } +NEDepthwiseConvolutionLayerOptimized::NEDepthwiseConvolutionLayerOptimized(std::shared_ptr<IMemoryManager> memory_manager) + : _memory_group(memory_manager), _dwc_kernel(), _dwc_optimized_func(memory_manager), _output_stage_kernel(), _border_handler(), _permute_input(), _permute_weights(), _permute_output(), + _activationlayer_function(), _accumulator(), _permuted_input(), _permuted_weights(), _permuted_output(), _original_weights(nullptr), _has_bias(false), _is_quantized(false), _is_optimized(false), + _is_nchw(true), _permute(false), _is_activationlayer_enabled(false), _is_prepared(false) +{ +} + +void NEDepthwiseConvolutionLayerOptimized::configure_generic(ITensor *input, + const ITensor *weights, + const ITensor *biases, + ITensor *output, + const PadStrideInfo &conv_info, + unsigned int depth_multiplier, + const ActivationLayerInfo &act_info, + const Size2D &dilation) +{ + ARM_COMPUTE_UNUSED(act_info); + + PixelValue zero_value(0.f); + + // Initialize the intermediate accumulator tensor in case of quantized input + if(_is_quantized) + { + TensorShape accum_shape = output->info()->tensor_shape(); + DataLayout accum_layout = output->info()->data_layout(); + if(!_is_nchw) + { + permute(accum_shape, PermutationVector(1U, 2U, 0U)); + accum_layout = DataLayout::NCHW; + } + + _memory_group.manage(&_accumulator); + _accumulator.allocator()->init(TensorInfo(accum_shape, 1, DataType::S32, output->info()->quantization_info())); + _accumulator.info()->set_data_layout(accum_layout); + zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().uniform().offset)); + } + + if(!_is_nchw) + { + _memory_group.manage(&_permuted_input); + _memory_group.manage(&_permuted_output); + + // Configure the function to transform the input tensor from NHWC -> NCHW + _permute_input.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U)); + _permuted_input.info()->set_data_layout(DataLayout::NCHW); + + // Configure the function to transform the weights tensor from HWI -> IHW + _permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U)); + _permuted_weights.info()->set_data_layout(DataLayout::NCHW); + _permuted_output.info()->set_quantization_info(output->info()->quantization_info()); + + // Configure depthwise + _dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier, dilation); + + // Configure border handler + _border_handler.configure(&_permuted_input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value); + + // Allocate tensors + _permuted_input.allocator()->allocate(); + } + else + { + // Configure depthwise convolution kernel + _dwc_kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info, depth_multiplier, dilation); + + // Configure border handler + _border_handler.configure(input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value); + } + + // Configure biases accumulation + if(_is_quantized) + { + const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform(); + const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = (output->info()->total_size() == 0) ? iq_info : output->info()->quantization_info().uniform(); + + float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale; + int output_multiplier; + int output_shift; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, oq_info.offset); + _accumulator.allocator()->allocate(); + } + else if(_has_bias) + { + _output_stage_kernel.configure(_is_nchw ? output : &_permuted_output, biases); + } + + // Permute output + if(!_is_nchw) + { + // Configure the function to transform the convoluted output to NHWC + _permute_output.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U)); + _permuted_output.allocator()->allocate(); + } +} + +void NEDepthwiseConvolutionLayerOptimized::configure_optimized(const ITensor *input, + const ITensor *weights, + const ITensor *biases, + ITensor *output, + const PadStrideInfo &conv_info, + unsigned int depth_multiplier, + const ActivationLayerInfo &act_info, + const Size2D &dilation) +{ + ActivationLayerInfo act_info_to_use = ActivationLayerInfo(); + const bool is_relu = arm_compute::utils::info_helpers::is_relu(act_info); + const bool is_relu6 = arm_compute::utils::info_helpers::is_relu6(act_info); + _is_activationlayer_enabled = act_info.enabled() && !(is_relu || is_relu6); + if(!_is_activationlayer_enabled) + { + act_info_to_use = act_info; + } + + if(_is_nchw) + { + _memory_group.manage(&_permuted_input); + _memory_group.manage(&_permuted_output); + + // Configure the function to transform the input tensor from NCHW -> NHWC + _permute_input.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U)); + _permuted_input.info()->set_data_layout(DataLayout::NHWC); + + // Configure the function to transform the weights tensor from IHW -> HWI + _permute_weights.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U)); + _permuted_weights.info()->set_data_layout(DataLayout::NHWC); + + _permuted_output.info()->set_data_layout(DataLayout::NHWC); + _permuted_output.info()->set_quantization_info(output->info()->quantization_info()); + + // Configure optimized depthwise + _dwc_optimized_func.configure(&_permuted_input, &_permuted_weights, biases, &_permuted_output, conv_info, depth_multiplier, act_info_to_use, dilation); + + // Configure the function to transform the convoluted output to ACL's native ordering format NCHW + _permuted_output.info()->set_data_layout(DataLayout::NHWC); + _permute_output.configure(&_permuted_output, output, PermutationVector(1U, 2U, 0U)); + + // Allocate tensors + _permuted_input.allocator()->allocate(); + _permuted_output.allocator()->allocate(); + } + else + { + _dwc_optimized_func.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info_to_use, dilation); + } +} + +void NEDepthwiseConvolutionLayerOptimized::configure(ITensor *input, + const ITensor *weights, + const ITensor *biases, + ITensor *output, const PadStrideInfo &conv_info, + unsigned int depth_multiplier, + const ActivationLayerInfo &act_info, + const Size2D &dilation) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); + + // idx_w and idx_h only used for validation + const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_UNUSED(idx_w); + ARM_COMPUTE_UNUSED(idx_h); + + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_w) + (weights->info()->dimension(idx_w) - 1) * (dilation.x() - 1) > input->info()->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_h) + (weights->info()->dimension(idx_h) - 1) * (dilation.y() - 1) > input->info()->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); + + _original_weights = weights; + _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); + _has_bias = biases != nullptr; + _is_optimized = NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input->info(), + weights->info(), + conv_info, + depth_multiplier, + dilation); + _is_nchw = input->info()->data_layout() == DataLayout::NCHW; + _permute = _is_optimized == _is_nchw; + _is_prepared = false; + _is_activationlayer_enabled = act_info.enabled(); + + // Configure appropriate pipeline + if(_is_optimized) + { + configure_optimized(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation); + } + else + { + configure_generic(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation); + } + + // Configure activation + if(_is_activationlayer_enabled) + { + _activationlayer_function.configure(output, nullptr, act_info); + } +} + +Status NEDepthwiseConvolutionLayerOptimized::validate(const ITensorInfo *input, + const ITensorInfo *weights, + const ITensorInfo *biases, + const ITensorInfo *output, + const PadStrideInfo &conv_info, + unsigned int depth_multiplier, + const ActivationLayerInfo &act_info, + const Size2D &dilation) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); + ARM_COMPUTE_RETURN_ERROR_ON(dilation.x() < 1 || dilation.y() < 1); + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); + + if(biases != nullptr) + { + const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); + ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(channel_idx)); + } + + if(!NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input, weights, conv_info, depth_multiplier, dilation)) + { + const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); + TensorInfo accumulator = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32)); + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionLayer3x3Kernel::validate(input, weights, is_quantized ? &accumulator : output, conv_info, depth_multiplier, dilation)); + + if(is_quantized) + { + ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, biases, output)); + } + } + else + { + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionAssemblyDispatch::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation)); + } + + //Validate Activation Layer + if(act_info.enabled()) + { + ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info)); + } + + return Status{}; +} + +void NEDepthwiseConvolutionLayerOptimized::run_generic() +{ + // Fill border + NEScheduler::get().schedule(&_border_handler, Window::DimX); + + // Execute depthwise convolution + NEScheduler::get().schedule(&_dwc_kernel, Window::DimX); + + // Add biases + if(_has_bias || _is_quantized) + { + NEScheduler::get().schedule(&_output_stage_kernel, Window::DimX); + } + + // Permute output + if(!_is_nchw) + { + _permute_output.run(); + } +} + +void NEDepthwiseConvolutionLayerOptimized::run_optimized() +{ + // Run assembly function + _dwc_optimized_func.run(); + + // Permute output + if(_is_nchw) + { + _permute_output.run(); + } +} + +void NEDepthwiseConvolutionLayerOptimized::run() +{ + prepare(); + + MemoryGroupResourceScope scope_mg(_memory_group); + + // Permute input + if(_permute) + { + _permute_input.run(); + } + + _is_optimized ? run_optimized() : run_generic(); + + // Run activation + if(_is_activationlayer_enabled) + { + _activationlayer_function.run(); + } +} + +void NEDepthwiseConvolutionLayerOptimized::prepare() +{ + if(!_is_prepared) + { + // Permute weights + if(_permute) + { + _permuted_weights.allocator()->allocate(); + _permute_weights.run(); + _original_weights->mark_as_unused(); + } + + // Prepare optimized function + if(_is_optimized) + { + _dwc_optimized_func.prepare(); + if(!_permuted_weights.is_used()) + { + _permuted_weights.allocator()->free(); + } + } + + _is_prepared = true; + } +} + NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer() : _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _output_stage_kernel(), _v2mm_input_fill_border(), _v2mm_weights_fill_border(), _permute_input(), _permute_weights(), _permute_output(), _activationlayer_function(), _input_reshaped(), _weights_reshaped(), _v2mm_output(), _output_reshaped(), _permuted_input(), _permuted_weights(), |