aboutsummaryrefslogtreecommitdiff
path: root/src/graph/mutators/NodeFusionMutator.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/graph/mutators/NodeFusionMutator.cpp')
-rw-r--r--src/graph/mutators/NodeFusionMutator.cpp30
1 files changed, 16 insertions, 14 deletions
diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
index 82bfe25a3e..7e66ce0757 100644
--- a/src/graph/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -38,26 +38,24 @@ namespace graph
{
namespace detail
{
-void fuse_batch_norm_with_activation(Graph &g)
+template <typename N>
+void fuse_node_with_activation(Graph &g, const std::set<Activation> &supported_fused_activations)
{
- // Supported activations when fusing
- const std::set<Activation> supported_fused_activations = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU };
-
// Not interested in the order of nodes
for(auto &node : g.nodes())
{
// Check if the node is batch norm and not a branching node
- if(node && node->type() == NodeType::BatchNormalizationLayer && node->output_edges().size() == 1)
+ if(node && node->type() == N::node_type && node->output_edges().size() == 1)
{
auto output_edge_id = *node->output_edges().begin();
auto output_edge = g.edge(output_edge_id);
// Check if following node is an activation layer node
if((output_edge != nullptr) && (output_edge->consumer() != nullptr) && (output_edge->consumer()->type() == NodeType::ActivationLayer))
{
- auto *bn_node = arm_compute::utils::cast::polymorphic_downcast<BatchNormalizationLayerNode *>(output_edge->producer());
+ auto *n_node = arm_compute::utils::cast::polymorphic_downcast<N *>(output_edge->producer());
auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(output_edge->consumer());
- ARM_COMPUTE_ERROR_ON(act_node->output(0) == nullptr || bn_node->output(0) == nullptr);
+ ARM_COMPUTE_ERROR_ON(act_node->output(0) == nullptr || n_node->output(0) == nullptr);
// Check if activation is supported for fusion
if(supported_fused_activations.count(act_node->activation_info().activation()) == 0)
@@ -65,17 +63,17 @@ void fuse_batch_norm_with_activation(Graph &g)
continue;
}
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing Batch Normalization node with ID : " << output_edge->producer_id()
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing node with ID : " << output_edge->producer_id()
<< " with Activation Layer node with ID : " << output_edge->consumer_id() << std::endl);
// Prevent fusion if batch normalization node has an output accessor
- if(bn_node->output(0)->accessor() == nullptr)
+ if(n_node->output(0)->accessor() == nullptr)
{
// Get driving nodes of activation node
std::vector<NodeIdxPair> act_driving_nodes = get_driving_nodes(*act_node);
// Set activation info to batch normalization
- bn_node->set_fused_activation(act_node->activation_info());
+ n_node->set_fused_activation(act_node->activation_info());
// Extract activation node accessor if any
auto act_node_accessor = act_node->output(0)->extract_accessor();
@@ -86,15 +84,15 @@ void fuse_batch_norm_with_activation(Graph &g)
// Update batch normalization node outputs
for(auto &driving_node : act_driving_nodes)
{
- g.add_connection(bn_node->id(), 0, driving_node.node_id, driving_node.index);
+ g.add_connection(n_node->id(), 0, driving_node.node_id, driving_node.index);
}
// Update accessor to batch normalization node
- bn_node->output(0)->set_accessor(std::move(act_node_accessor));
+ n_node->output(0)->set_accessor(std::move(act_node_accessor));
}
else
{
- ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion as batch normalization node has an output accessor\n");
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of node with activation due to the presence of an output accessor\n");
}
}
}
@@ -109,7 +107,11 @@ const char *NodeFusionMutator::name()
void NodeFusionMutator::mutate(Graph &g)
{
- detail::fuse_batch_norm_with_activation(g);
+ // Supported activations when fusing
+ const std::set<Activation> supported_fused_activations = { Activation::RELU, Activation::BOUNDED_RELU, Activation::LU_BOUNDED_RELU };
+
+ detail::fuse_node_with_activation<BatchNormalizationLayerNode>(g, supported_fused_activations);
+ detail::fuse_node_with_activation<ConvolutionLayerNode>(g, supported_fused_activations);
}
} // namespace graph
} // namespace arm_compute