From 749021ae84dcc1221917338f288b0eedc36da093 Mon Sep 17 00:00:00 2001 From: giuros01 Date: Thu, 14 Mar 2019 16:19:41 +0000 Subject: COMPMID-1995: Revert fusing convolution to batch norm due to performance regressions Change-Id: I1476727e8e22f03d7dc7d79956ac02f16c06e814 Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/857 Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- src/graph/mutators/NodeFusionMutator.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/graph') diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp index 445748caf7..5927a597bb 100644 --- a/src/graph/mutators/NodeFusionMutator.cpp +++ b/src/graph/mutators/NodeFusionMutator.cpp @@ -221,7 +221,9 @@ void NodeFusionMutator::mutate(Graph &g) detail::fuse_layer(g, empty_prec, detail::fuse_node_with_activation, supported_fused_activations); detail::fuse_layer(g, empty_prec, detail::fuse_node_with_activation, supported_fused_activations); detail::fuse_layer(g, qs8_prec, detail::fuse_node_with_activation, supported_fused_activations); - detail::fuse_layer(g, empty_prec, detail::fuse_convolution_with_batch_normalization); + + // TODO (COMPMID-2055): re-enable once we fuse bias and activations to convolution + // detail::fuse_layer(g, empty_prec, detail::fuse_convolution_with_batch_normalization); } } // namespace graph } // namespace arm_compute -- cgit v1.2.1