From 7c0607e142456ebd3577c756b419a3b551cdeafb Mon Sep 17 00:00:00 2001 From: Fredrik Svedberg Date: Tue, 12 Oct 2021 15:01:53 +0200 Subject: MLBEDSW-5315 MLCE: Vela to handle skip Tensor Removed graph optimizations no longer needed that caused problems with FullyConnected operators running on CPU being consumed by elementwise operators in Vela. Signed-off-by: Fredrik Svedberg Change-Id: Ic7e66141ccd5e9aa8f0022c5ab9e7fd1ba3f6786 --- ethosu/vela/tflite_graph_optimiser.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py index cf211de4..f59eddef 100644 --- a/ethosu/vela/tflite_graph_optimiser.py +++ b/ethosu/vela/tflite_graph_optimiser.py @@ -40,7 +40,6 @@ from .graph_optimiser_util import needed_total_padding from .graph_optimiser_util import set_ifm_ofm_op_shapes from .graph_optimiser_util import set_tensor_equivalence from .numeric_util import clamp_sigmoid -from .numeric_util import full_shape from .numeric_util import round_away_zero from .operation import create_activation_function from .operation import ExplicitScaling @@ -623,26 +622,6 @@ def fixup_relus_with_differing_ifm_ofm_scaling(op, arch, nng): return op -def fixup_elementwise_with_scalars(op, arch, nng): - if op.type.is_binary_elementwise_op(): - ifm_tensor, ifm2_tensor, _, _ = op.get_ifm_ifm2_weights_ofm() - if ifm2_tensor.shape != [] and ifm_tensor.shape != []: - diff = len(ifm_tensor.shape) - len(ifm2_tensor.shape) - if diff > 0: - ifm2_tensor.shape = full_shape(len(ifm_tensor.shape), ifm2_tensor.shape, 1) - elif diff < 0: - ifm_tensor.shape = full_shape(len(ifm2_tensor.shape), ifm_tensor.shape, 1) - elif ifm_tensor.shape == [] and ifm_tensor.values is None: - # IFM is marked as a scalar, but is a result of an operation; change it to a shape of size 1 - ifm_tensor.shape = len(ifm2_tensor.shape) * [1] - ifm_tensor.storage_shape = ifm_tensor.shape - elif ifm2_tensor.shape == [] and ifm2_tensor.values is None: - # IFM2 is marked as a scalar, but is a result of an operation; change it to a shape of size 1 - ifm2_tensor.shape = len(ifm_tensor.shape) * [1] - ifm2_tensor.storage_shape = ifm2_tensor.shape - return op - - def convert_softmax(op, arch, nng): if op.type == Op.Softmax and op.run_on_npu: softmax = SoftMax(op) @@ -1423,7 +1402,6 @@ def tflite_optimise_graph(nng, arch): convert_batched_fc_shape, fixup_conv2d_backprop, fixup_relus_with_differing_ifm_ofm_scaling, - fixup_elementwise_with_scalars, reorder_depthwise_weights, fixup_resizebilinear, fixup_bias_tensors, -- cgit v1.2.1