From c509d33b81d0c2fadd938ec9445d0616db86e5fb Mon Sep 17 00:00:00 2001 From: Patrik Gustavsson Date: Tue, 22 Dec 2020 13:53:52 +0100 Subject: MLBEDSW-3654 Fix setting op ifm ofm for LeakyRelu Added op.set_ifm_ofm_shapes to the convertion functions Signed-off-by: Patrik Gustavsson Change-Id: I727d4cf34395bc0997863df1ac89537f84f9c7c8 --- ethosu/vela/graph_optimiser.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index e468f6df..6d2696c4 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -893,6 +893,7 @@ def convert_mul_max_to_abs_or_lrelu(op, arch, nng): op.name = op.name.replace("Maximum", new_op.name) op.outputs[0].name = op.outputs[0].name.replace("Maximum", new_op.name) op.inputs = [shared_in] + op.set_ifm_ofm_shapes() # Record optimisation in debug database DebugDatabase.add_optimised(op, op) @@ -953,6 +954,7 @@ def convert_lrelu_to_mul_max(op, arch): ifm.consumer_list.remove(op) op.add_input_tensor(fm_alpha) op.add_input_tensor(fm_id) + op.set_ifm_ofm_shapes() DebugDatabase.add_optimised(op, op) return op @@ -982,6 +984,7 @@ def convert_to_lut(op, lut_values, lut_name): op.forced_output_quantization = ifm.quantization lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8) op.set_activation_lut(lut_tensor) + op.set_ifm_ofm_shapes() return op -- cgit v1.2.1