From 2a2db590fd179dcb8e1a575293cd2b887e2dc246 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 15 Aug 2018 12:14:46 +0100 Subject: COMPMID-1505: Add native grouping support at graph level Change-Id: Iedc91b0aee743b59af5140c8acb8124548da3163 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144362 Tested-by: Jenkins Reviewed-by: Giorgio Arena Reviewed-by: Michele DiGiorgio --- src/graph/mutators/DepthConcatSubTensorMutator.cpp | 19 ++- src/graph/mutators/GroupedConvolutionMutator.cpp | 186 +++++++++++++++++++++ src/graph/mutators/NodeExecutionMethodMutator.cpp | 97 +++++++++++ src/graph/mutators/NodeFusionMutator.cpp | 13 +- src/graph/mutators/SplitLayerSubTensorMutator.cpp | 17 +- 5 files changed, 314 insertions(+), 18 deletions(-) create mode 100644 src/graph/mutators/GroupedConvolutionMutator.cpp create mode 100644 src/graph/mutators/NodeExecutionMethodMutator.cpp (limited to 'src/graph/mutators') diff --git a/src/graph/mutators/DepthConcatSubTensorMutator.cpp b/src/graph/mutators/DepthConcatSubTensorMutator.cpp index 241c07b367..937528d143 100644 --- a/src/graph/mutators/DepthConcatSubTensorMutator.cpp +++ b/src/graph/mutators/DepthConcatSubTensorMutator.cpp @@ -26,6 +26,7 @@ #include "arm_compute/graph/Graph.h" #include "arm_compute/graph/Logger.h" #include "arm_compute/graph/Utils.h" +#include "arm_compute/graph/algorithms/TopologicalSort.h" #include "arm_compute/graph/backends/BackendRegistry.h" #include "arm_compute/graph/nodes/ConcatenateLayerNode.h" @@ -43,16 +44,26 @@ const char *DepthConcatSubTensorMutator::name() void DepthConcatSubTensorMutator::mutate(Graph &g) { + // Early exit if no Concatenation layers exist in graph + if(g.nodes(NodeType::ConcatenateLayer).empty()) + { + return; + } + + // Perform topological sort + std::vector topological_sorted_node_ids = dfs(g); + // Should be in reverse order of execution - for(auto &node : arm_compute::utils::iterable::reverse_iterate(g.nodes())) + for(auto &node_id : arm_compute::utils::iterable::reverse_iterate(topological_sorted_node_ids)) { - if(node && node->type() == NodeType::ConcatenateLayer && node->output(0) != nullptr) + INode *node = g.node(node_id); + if(node != nullptr && node->type() == NodeType::ConcatenateLayer && node->output(0) != nullptr) { // Get output tensor auto output_tensor = node->output(0); // Check concatenation axis (Sub-tensor optimization is support for concatenation axis >=2) - auto *concat_node = arm_compute::utils::cast::polymorphic_downcast(node.get()); + auto *concat_node = arm_compute::utils::cast::polymorphic_downcast(node); if(output_tensor == nullptr || get_dimension_idx(output_tensor->desc(), concat_node->concatenation_axis()) < 2) { continue; @@ -84,7 +95,7 @@ void DepthConcatSubTensorMutator::mutate(Graph &g) depth += input_shape.z(); } - auto *dc_node = arm_compute::utils::cast::polymorphic_downcast(node.get()); + auto *dc_node = arm_compute::utils::cast::polymorphic_downcast(node); dc_node->set_enabled(false); } } diff --git a/src/graph/mutators/GroupedConvolutionMutator.cpp b/src/graph/mutators/GroupedConvolutionMutator.cpp new file mode 100644 index 0000000000..d2643d5428 --- /dev/null +++ b/src/graph/mutators/GroupedConvolutionMutator.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph/mutators/GroupedConvolutionMutator.h" + +#include "arm_compute/graph/Graph.h" +#include "arm_compute/graph/GraphBuilder.h" +#include "arm_compute/graph/Logger.h" +#include "arm_compute/graph/Utils.h" +#include "arm_compute/graph/backends/BackendRegistry.h" +#include "arm_compute/graph/nodes/Nodes.h" + +#include "arm_compute/core/utils/misc/Cast.h" + +#include + +namespace arm_compute +{ +namespace graph +{ +namespace +{ +NodeID create_grouped_convolution(Graph &g, const NodeParams ¶ms, NodeIdxPair input, NodeID weights, NodeID bias, + PadStrideInfo conv_info, ConvolutionMethod method, FastMathHint fast_math_hint, unsigned int num_groups) +{ + bool has_bias = (bias != EmptyNodeID); + + // Split input + const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]); + const unsigned int input_idx = get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL); + NodeID input_split = GraphBuilder::add_split_node(g, params, input, num_groups, input_idx); + + // Split weights + const TensorDescriptor weights_tensor_desc = get_tensor_descriptor(g, g.node(weights)->outputs()[0]); + const unsigned int batch_idx = get_dimension_idx(weights_tensor_desc, DataLayoutDimension::BATCHES); + NodeID weights_split = GraphBuilder::add_split_node(g, params, { weights, 0 }, num_groups, batch_idx); + + // Split bias + NodeID bias_split = EmptyNodeID; + if(has_bias) + { + // Split bias + bias_split = GraphBuilder::add_split_node(g, params, { bias, 0 }, num_groups, 0); + } + + std::vector convolution_outputs; + for(unsigned int i = 0; i < num_groups; ++i) + { + NodeParams group_params = params; + NodeID conv_nid = g.add_node(conv_info, 1, method, fast_math_hint); + g.add_connection(input_split, i, conv_nid, 0); + g.add_connection(weights_split, i, conv_nid, 1); + if(has_bias) + { + g.add_connection(bias_split, i, conv_nid, 2); + } + + // Add group name + if(!group_params.name.empty()) + { + group_params.name.append("_g" + arm_compute::support::cpp11::to_string(i)); + } + + // Set node parameters + INode *node = g.node(conv_nid); + ARM_COMPUTE_ERROR_ON(node == nullptr); + node->set_common_node_parameters(group_params); + + convolution_outputs.push_back({ conv_nid, 0 }); + } + + // Depth concatenate output + return GraphBuilder::add_concatenate_node(g, params, convolution_outputs, DataLayoutDimension::CHANNEL); +} +} // namespace + +const char *GroupedConvolutionMutator::name() +{ + return "GroupedConvolutionMutator"; +} + +void GroupedConvolutionMutator::mutate(Graph &g) +{ + // Early exit if no Convolution layers exist in graph + if(g.nodes(NodeType::ConvolutionLayer).empty()) + { + return; + } + + // Total nodes + size_t total_nodes = g.nodes().size(); + + // Iterate over convolution nodes + for(unsigned int i = 0; i < total_nodes; ++i) + { + INode *node = g.node(i); + if(node != nullptr && node->type() == NodeType::ConvolutionLayer && arm_compute::utils::cast::polymorphic_downcast(node)->num_groups() != 1) + { + // Validate node + backends::IDeviceBackend *backend = backends::BackendRegistry::get().find_backend(node->assigned_target()); + Status status = backend->validate_node(*node); + + // If grouped convolution is not supported + if(!bool(status)) + { + // Down-cast node + auto *conv_node = arm_compute::utils::cast::polymorphic_downcast(node); + + // Get internal convolution info + // TODO (geopin01) : Create a descriptor + const PadStrideInfo conv_info = conv_node->convolution_info(); + const ConvolutionMethod conv_method = conv_node->convolution_method(); + const FastMathHint fast_math_hint = conv_node->fast_math_hint(); + const unsigned int num_groups = conv_node->num_groups(); + const NodeParams params = conv_node->common_node_params(); + const Target assigned_target = conv_node->assigned_target(); + + // Extract node ids + const NodeID input_id = conv_node->input_id(0); + const NodeID weights_id = conv_node->input_id(1); + const NodeID bias_id = conv_node->input_id(2); + + // Get driving nodes + std::vector driving_nodes = get_driving_nodes(*node); + + // Extract activation node accessor if any + auto node_accessor = conv_node->output(0)->extract_accessor(); + + // Current max tensor and node id + TensorID latest_tid = g.tensors().size(); + NodeID latest_nid = g.nodes().size(); + + // Create grouped convolution node + NodeID grouped_conv_id = create_grouped_convolution(g, params, { input_id, 0 }, weights_id, bias_id, + conv_info, conv_method, fast_math_hint, num_groups); + + // Remove convolution node + g.remove_node(node->id()); + + // Update batch normalization node outputs + for(auto &driving_node : driving_nodes) + { + g.add_connection(grouped_conv_id, 0, driving_node.node_id, driving_node.index); + } + + // Update accessor to batch normalization node + g.node(grouped_conv_id)->output(0)->set_accessor(std::move(node_accessor)); + + // Configure new tensors and nodes + std::for_each(g.tensors().begin() + latest_tid, g.tensors().end(), [](std::unique_ptr &t) + { + configure_tensor(t.get()); + }); + std::for_each(g.nodes().begin() + latest_nid, g.nodes().end(), [&assigned_target](std::unique_ptr &n) + { + if(n != nullptr) + { + n->set_assigned_target(assigned_target); + } + }); + } + } + } +} +} // namespace graph +} // namespace arm_compute diff --git a/src/graph/mutators/NodeExecutionMethodMutator.cpp b/src/graph/mutators/NodeExecutionMethodMutator.cpp new file mode 100644 index 0000000000..896bf0742c --- /dev/null +++ b/src/graph/mutators/NodeExecutionMethodMutator.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph/mutators/NodeExecutionMethodMutator.h" + +#include "arm_compute/graph/Graph.h" +#include "arm_compute/graph/Logger.h" +#include "arm_compute/graph/Utils.h" +#include "arm_compute/graph/backends/BackendRegistry.h" +#include "arm_compute/graph/nodes/Nodes.h" + +#include "arm_compute/core/utils/misc/Cast.h" + +namespace arm_compute +{ +namespace graph +{ +namespace +{ +/** Runs a default setter function on a given types of nodes + * + * @tparam Setter Setter function to run + * + * @param[in, out] g Graph to extract the nodes from + * @param[in] node_type Node type + * @param[in] setter Setter function + */ +template +void set_default_on_invalid_method(Graph &g, NodeType node_type, Setter &&setter) +{ + const std::vector &node_ids = g.nodes(node_type); + for(auto &node_id : node_ids) + { + INode *node = g.node(node_id); + if(node != nullptr) + { + // Validate node + backends::IDeviceBackend *backend = backends::BackendRegistry::get().find_backend(node->assigned_target()); + Status status = backend->validate_node(*node); + + // Set default execution method in case of failure + if(!bool(status)) + { + setter(node); + } + } + } +} +} // namespace + +const char *NodeExecutionMethodMutator::name() +{ + return "NodeExecutionMethodMutator"; +} + +void NodeExecutionMethodMutator::mutate(Graph &g) +{ + // Convolution Layer + set_default_on_invalid_method(g, NodeType::ConvolutionLayer, [](INode * n) + { + ARM_COMPUTE_LOG_GRAPH_INFO("Switched ConvolutionLayer method of node with ID : " + << n->id() << " and Name: " << n->name() << std::endl); + auto *casted_node = arm_compute::utils::cast::polymorphic_downcast(n); + casted_node->set_convolution_method(ConvolutionMethod::Default); + }); + + // Depthwise Convolution Layer + set_default_on_invalid_method(g, NodeType::DepthwiseConvolutionLayer, [](INode * n) + { + ARM_COMPUTE_LOG_GRAPH_INFO("Switched Depthwise ConvolutionLayer method of node with ID : " + << n->id() << " and Name: " << n->name() << std::endl); + auto *casted_node = arm_compute::utils::cast::polymorphic_downcast(n); + casted_node->set_depthwise_convolution_method(DepthwiseConvolutionMethod::Default); + }); +} +} // namespace graph +} // namespace arm_compute diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp index 6677330cec..82bfe25a3e 100644 --- a/src/graph/mutators/NodeFusionMutator.cpp +++ b/src/graph/mutators/NodeFusionMutator.cpp @@ -25,6 +25,7 @@ #include "arm_compute/graph/Graph.h" #include "arm_compute/graph/Logger.h" +#include "arm_compute/graph/Utils.h" #include "arm_compute/graph/nodes/Nodes.h" #include "arm_compute/core/utils/misc/Cast.h" @@ -71,17 +72,7 @@ void fuse_batch_norm_with_activation(Graph &g) if(bn_node->output(0)->accessor() == nullptr) { // Get driving nodes of activation node - std::vector act_driving_nodes; - for(auto &act_output_edge_id : act_node->output_edges()) - { - auto act_output_edge = g.edge(act_output_edge_id); - if(act_output_edge != nullptr) - { - ARM_COMPUTE_ERROR_ON(act_output_edge->consumer() == nullptr); - act_driving_nodes.push_back( - { act_output_edge->consumer_id(), act_output_edge->consumer_idx() }); - } - } + std::vector act_driving_nodes = get_driving_nodes(*act_node); // Set activation info to batch normalization bn_node->set_fused_activation(act_node->activation_info()); diff --git a/src/graph/mutators/SplitLayerSubTensorMutator.cpp b/src/graph/mutators/SplitLayerSubTensorMutator.cpp index 2a8c029843..5f1c9c3186 100644 --- a/src/graph/mutators/SplitLayerSubTensorMutator.cpp +++ b/src/graph/mutators/SplitLayerSubTensorMutator.cpp @@ -25,6 +25,7 @@ #include "arm_compute/graph/Graph.h" #include "arm_compute/graph/Logger.h" +#include "arm_compute/graph/algorithms/TopologicalSort.h" #include "arm_compute/graph/backends/BackendRegistry.h" #include "arm_compute/graph/nodes/SplitLayerNode.h" @@ -42,10 +43,20 @@ const char *SplitLayerSubTensorMutator::name() void SplitLayerSubTensorMutator::mutate(Graph &g) { + // Early exit if no Split layers exist in graph + if(g.nodes(NodeType::SplitLayer).empty()) + { + return; + } + + // Perform topological sort + std::vector topological_sorted_node_ids = dfs(g); + // Should be in reverse order of execution - for(auto &node : arm_compute::utils::iterable::reverse_iterate(g.nodes())) + for(auto &node_id : arm_compute::utils::iterable::reverse_iterate(topological_sorted_node_ids)) { - if(node && node->type() == NodeType::SplitLayer && node->input(0) != nullptr) + INode *node = g.node(node_id); + if(node != nullptr && node->type() == NodeType::SplitLayer && node->input(0) != nullptr) { // Get output tensor Tensor *input_tensor = node->input(0); @@ -63,7 +74,7 @@ void SplitLayerSubTensorMutator::mutate(Graph &g) ARM_COMPUTE_LOG_GRAPH_VERBOSE("Using sub-tensors for the node with ID : " << node->id() << " and name : " << node->name() << std::endl); - auto *split_node = arm_compute::utils::cast::polymorphic_downcast(node.get()); + auto *split_node = arm_compute::utils::cast::polymorphic_downcast(node); const unsigned int axis = split_node->axis(); const unsigned int num_splits = split_node->num_splits(); -- cgit v1.2.1