aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFredrik Svedberg <fredrik.svedberg@arm.com>2021-10-12 15:01:53 +0200
committerFredrik Svedberg <fredrik.svedberg@arm.com>2021-10-13 12:15:26 +0000
commit7c0607e142456ebd3577c756b419a3b551cdeafb (patch)
tree7ffeab631bb92d1b5da8019d3070788c898e0442
parent62cdfe52b20bcf6817bf1e1b543689cf5f90fdec (diff)
downloadethos-u-vela-7c0607e142456ebd3577c756b419a3b551cdeafb.tar.gz
MLBEDSW-5315 MLCE: Vela to handle skip Tensor
Removed graph optimizations no longer needed that caused problems with FullyConnected operators running on CPU being consumed by elementwise operators in Vela. Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com> Change-Id: Ic7e66141ccd5e9aa8f0022c5ab9e7fd1ba3f6786
-rw-r--r--ethosu/vela/tflite_graph_optimiser.py22
1 files changed, 0 insertions, 22 deletions
diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py
index cf211de..f59edde 100644
--- a/ethosu/vela/tflite_graph_optimiser.py
+++ b/ethosu/vela/tflite_graph_optimiser.py
@@ -40,7 +40,6 @@ from .graph_optimiser_util import needed_total_padding
from .graph_optimiser_util import set_ifm_ofm_op_shapes
from .graph_optimiser_util import set_tensor_equivalence
from .numeric_util import clamp_sigmoid
-from .numeric_util import full_shape
from .numeric_util import round_away_zero
from .operation import create_activation_function
from .operation import ExplicitScaling
@@ -623,26 +622,6 @@ def fixup_relus_with_differing_ifm_ofm_scaling(op, arch, nng):
return op
-def fixup_elementwise_with_scalars(op, arch, nng):
- if op.type.is_binary_elementwise_op():
- ifm_tensor, ifm2_tensor, _, _ = op.get_ifm_ifm2_weights_ofm()
- if ifm2_tensor.shape != [] and ifm_tensor.shape != []:
- diff = len(ifm_tensor.shape) - len(ifm2_tensor.shape)
- if diff > 0:
- ifm2_tensor.shape = full_shape(len(ifm_tensor.shape), ifm2_tensor.shape, 1)
- elif diff < 0:
- ifm_tensor.shape = full_shape(len(ifm2_tensor.shape), ifm_tensor.shape, 1)
- elif ifm_tensor.shape == [] and ifm_tensor.values is None:
- # IFM is marked as a scalar, but is a result of an operation; change it to a shape of size 1
- ifm_tensor.shape = len(ifm2_tensor.shape) * [1]
- ifm_tensor.storage_shape = ifm_tensor.shape
- elif ifm2_tensor.shape == [] and ifm2_tensor.values is None:
- # IFM2 is marked as a scalar, but is a result of an operation; change it to a shape of size 1
- ifm2_tensor.shape = len(ifm_tensor.shape) * [1]
- ifm2_tensor.storage_shape = ifm2_tensor.shape
- return op
-
-
def convert_softmax(op, arch, nng):
if op.type == Op.Softmax and op.run_on_npu:
softmax = SoftMax(op)
@@ -1423,7 +1402,6 @@ def tflite_optimise_graph(nng, arch):
convert_batched_fc_shape,
fixup_conv2d_backprop,
fixup_relus_with_differing_ifm_ofm_scaling,
- fixup_elementwise_with_scalars,
reorder_depthwise_weights,
fixup_resizebilinear,
fixup_bias_tensors,