From 2349d429d926e258e9a61d34c7fd97660ab9fb98 Mon Sep 17 00:00:00 2001 From: Patrik Gustavsson Date: Tue, 1 Dec 2020 16:02:29 +0100 Subject: MLBEDSW-3654 Add/use op ifm/ofm shapes Add ifm/ofm shapes to op Changed to rely on these shapes Signed-off-by: Patrik Gustavsson Change-Id: I571535a1dcadc2bdb04a3c727a8e1c49703b174d --- ethosu/vela/softmax.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'ethosu/vela/softmax.py') diff --git a/ethosu/vela/softmax.py b/ethosu/vela/softmax.py index 8b061297..98496539 100644 --- a/ethosu/vela/softmax.py +++ b/ethosu/vela/softmax.py @@ -213,7 +213,7 @@ class SoftMax: ofm = self.op.outputs[0] # Reshape ifm/ofm (if needed) - full_shape = ifm.get_full_shape() + full_shape = self.op.ifm_shapes[0] if full_shape[0] > 1: full_shape[1] *= full_shape[0] full_shape[0] = 1 @@ -230,9 +230,6 @@ class SoftMax: def get_graph_8bit(self, ifm, ofm): exp_lut = self.generate_exp_table(self.op.attrs.get("beta", 1.0), ifm.quantization.scale_f32) - ifm = create_reshape_tensor(ifm, ifm.get_full_shape()) - DebugDatabase.add_optimised(self.op, ifm.ops[0]) - ofm = create_reshape_tensor(ofm, ofm.get_full_shape(), False) no_scale_quant = ifm.quantization.clone() no_scale_quant.scale_f32 = None no_scale_quant.zero_point = 0 -- cgit v1.2.1