From 814bddfd92c568cfb0bcfc23537b7658f29628c4 Mon Sep 17 00:00:00 2001 From: Gunes Bayir Date: Wed, 1 Sep 2021 16:20:54 +0100 Subject: Fuse pad layer with subsequent convolution layer Fusing occurs only if - the padding is only for height/width - padding pixel value is 0 - padding node output has no accessors Resolves: COMPMID-4702 Signed-off-by: Gunes Bayir Change-Id: I0755d5fb0bd3a55d9f10b32ce9da44e7c5a25279 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6189 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Tello Reviewed-by: Georgios Pinitas Comments-Addressed: Georgios Pinitas --- src/graph/Utils.cpp | 20 +++++++ src/graph/mutators/NodeFusionMutator.cpp | 70 +++++++++++++++++++++++ src/graph/nodes/ConvolutionLayerNode.cpp | 7 ++- src/graph/nodes/DepthwiseConvolutionLayerNode.cpp | 7 ++- 4 files changed, 102 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/graph/Utils.cpp b/src/graph/Utils.cpp index 7309737bd7..7db06b9c70 100644 --- a/src/graph/Utils.cpp +++ b/src/graph/Utils.cpp @@ -194,6 +194,26 @@ std::vector get_driving_nodes(const INode &node) return driving_nodes; } +std::vector get_driver_nodes(const INode &node) +{ + std::vector driver_nodes; + + const Graph *g = node.graph(); + ARM_COMPUTE_ERROR_ON(g == nullptr); + + for(auto &input_edge_id : node.input_edges()) + { + auto input_edge = g->edge(input_edge_id); + if(input_edge != nullptr) + { + ARM_COMPUTE_ERROR_ON(input_edge->producer() == nullptr); + driver_nodes.push_back({ input_edge->producer_id(), input_edge->producer_idx() }); + } + } + + return driver_nodes; +} + void configure_tensor(Tensor *tensor) { if(tensor != nullptr && tensor->handle() == nullptr) diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp index 5a696f8386..b530fb0c00 100644 --- a/src/graph/mutators/NodeFusionMutator.cpp +++ b/src/graph/mutators/NodeFusionMutator.cpp @@ -265,6 +265,74 @@ void fuse_node_with_activation(Graph &g, const Edge *output_edge, const std::set } } +bool check_padding_info(const DataLayout &layout, const PaddingList &padding_list, PaddingInfo &pad_w, PaddingInfo &pad_h) +{ + if(layout == DataLayout::NCHW || layout == DataLayout::NHWC) + { + const PaddingInfo zero_padding(0, 0); + + const unsigned int height_index = get_dimension_idx(layout, DataLayoutDimension::HEIGHT); + const unsigned int width_index = get_dimension_idx(layout, DataLayoutDimension::WIDTH); + + pad_w = width_index < padding_list.size() ? padding_list[width_index] : zero_padding; + pad_h = height_index < padding_list.size() ? padding_list[height_index] : zero_padding; + + for(unsigned int i = 0; i < padding_list.size(); i++) + { + if(i != height_index && i != width_index && padding_list[i] != zero_padding) + { + // if the index is not either height or width, don't fuse + return false; + } + } + + return true; + } + + return false; +} + +template +void fuse_pad_with_convolution(Graph &g, const Edge *output_edge) +{ + auto *pad_node = arm_compute::utils::cast::polymorphic_downcast(output_edge->producer()); + auto *conv_node = arm_compute::utils::cast::polymorphic_downcast(output_edge->consumer()); + + const Edge *input_edge = pad_node->input_edge(0); + if(input_edge != nullptr && input_edge->tensor() != nullptr && pad_node->output(0)->accessor() == nullptr + && pad_node->pad_value().get() == 0.0) + { + const DataLayout layout = input_edge->tensor()->desc().layout; + const PaddingList padding_list = pad_node->padding(); + PaddingInfo pad_w, pad_h; + + if(check_padding_info(layout, padding_list, pad_w, pad_h)) + { + // Add paddings to the convolution node + const PadStrideInfo conv_info = conv_node->convolution_info(); + const PadStrideInfo new_conv_info( + conv_info.stride().first, + conv_info.stride().second, + conv_info.pad_left() + pad_w.first, + conv_info.pad_right() + pad_w.second, + conv_info.pad_top() + pad_h.first, + conv_info.pad_bottom() + pad_h.second, + conv_info.round()); + conv_node->set_convolution_info(new_conv_info); + + // Update drivers of the convolution node + std::vector pad_driver_nodes = get_driver_nodes(*pad_node); + g.remove_node(pad_node->id()); + + // Update fused node inputs + for(auto &driver_node : pad_driver_nodes) + { + g.add_connection(driver_node.node_id, driver_node.index, conv_node->id(), 0); + } + } + } +} + template void fuse_layer(Graph &g, std::function const &prec, const F fuse_fcn, Args &&... optional_arguments) { @@ -333,6 +401,8 @@ void NodeFusionMutator::mutate(Graph &g) }; // Fusion mutations + detail::fuse_layer(g, empty_prec, detail::fuse_pad_with_convolution); + detail::fuse_layer(g, empty_prec, detail::fuse_pad_with_convolution); detail::fuse_layer(g, empty_prec, detail::fuse_node_with_activation, supported_fused_activations); detail::fuse_layer(g, empty_prec, detail::fuse_node_with_activation, supported_fused_activations); detail::fuse_layer(g, qs8_prec, detail::fuse_node_with_activation, supported_fused_activations); diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp index a9825702ce..ee9dde91d5 100644 --- a/src/graph/nodes/ConvolutionLayerNode.cpp +++ b/src/graph/nodes/ConvolutionLayerNode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2019, 2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -83,6 +83,11 @@ void ConvolutionLayerNode::set_fused_activation(ActivationLayerInfo fused_activa _fused_activation = fused_activation; } +void ConvolutionLayerNode::set_convolution_info(PadStrideInfo info) +{ + _info = info; +} + TensorDescriptor ConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, const PadStrideInfo &info) diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp index 42fb0fd6da..7de20165cb 100644 --- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp +++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2019, 2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -70,6 +70,11 @@ void DepthwiseConvolutionLayerNode::set_fused_activation(ActivationLayerInfo fus _fused_activation = fused_activation; } +void DepthwiseConvolutionLayerNode::set_convolution_info(PadStrideInfo info) +{ + _info = info; +} + TensorDescriptor DepthwiseConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, const PadStrideInfo &info, -- cgit v1.2.1