diff options
author | Dwight Lidman <dwight.lidman@arm.com> | 2020-11-05 10:34:41 +0100 |
---|---|---|
committer | patrik.gustavsson <patrik.gustavsson@arm.com> | 2020-11-10 13:40:57 +0000 |
commit | 73320a48dfa711f5938b0e3d8e03b9858558b899 (patch) | |
tree | 9f1b43f86304ce59681a35304401034e4dfa152d /ethosu/vela/graph_optimiser.py | |
parent | 6ae0e4212abf1b92506fcbb180f647a953a37d89 (diff) | |
download | ethos-u-vela-73320a48dfa711f5938b0e3d8e03b9858558b899.tar.gz |
MLBEDSW-3377: fixup_stridedslice_output may silently change CPU ops
This commit removes the constraint on all tensor
shapes matching the OFM shape.
The motivation is that this constraint essentially
only checks that the fixup function has run.
This means that it removes the possibility for the
fixup function to run after the supported operator
check and this effectively means that any
StridedSlice operator that would be placed on the
CPU is still modified by the fixup function.
Because the fixup function is moved to after the
supported operators check, some unreachable cases
are removed from the fixup function.
Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I7a82126b7de73bd67873b4e6daf53a6767e33d16
Diffstat (limited to 'ethosu/vela/graph_optimiser.py')
-rw-r--r-- | ethosu/vela/graph_optimiser.py | 16 |
1 files changed, 4 insertions, 12 deletions
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index 32f97d2f..e31348b5 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -422,19 +422,12 @@ def unfuse_activation_function(op, arch, nng): def fixup_stridedslice_output(tens, arch, nng): op = tens.ops[0] - if op.type == Op.StridedSlice: + if op.run_on_npu and op.type == Op.StridedSlice: reshape_input_shape = tens.shape new_axis_mask = op.attrs["new_axis_mask"] shrink_axis_mask = op.attrs["shrink_axis_mask"] - ellipsis_mask = op.attrs["ellipsis_mask"] - if (new_axis_mask != 0 and shrink_axis_mask != 0) or ellipsis_mask != 0: - # Not supported, will be put on CPU - return tens - if shrink_axis_mask == 0 and new_axis_mask == 0: - # Equal Rank StridedSlice, no need to insert reshape - return tens - elif shrink_axis_mask != 0: + if shrink_axis_mask != 0: n = 0 axis = 0 while shrink_axis_mask: @@ -446,7 +439,6 @@ def fixup_stridedslice_output(tens, arch, nng): assert len(tens.shape) == (len(op.inputs[0].shape) - n) op.attrs["shrink_axis_mask"] = 0 - elif new_axis_mask != 0: n = 0 axis = 0 @@ -1092,7 +1084,7 @@ def optimise_graph_a(nng, arch, verbose_graph=False): for idx, sg in enumerate(nng.subgraphs): # rewrite graph pass nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( - nng, sg, arch, [fixup_stridedslice_output], op_rewrite_list, rewrite_unsupported=False, + nng, sg, arch, [], op_rewrite_list, rewrite_unsupported=False, ) for idx, sg in enumerate(nng.subgraphs): @@ -1113,7 +1105,7 @@ def optimise_graph_b(nng, arch, verbose_graph=False): for idx, sg in enumerate(nng.subgraphs): # combined rewrite graph pass nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( - nng, sg, arch, [fixup_unpack_output, rewrite_concat, rewrite_split], [] + nng, sg, arch, [fixup_unpack_output, fixup_stridedslice_output, rewrite_concat, rewrite_split], [] ) if verbose_graph: |