aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/tflite_reader.py
diff options
context:
space:
mode:
authorFredrik Svedberg <fredrik.svedberg@arm.com>2020-09-29 10:00:39 +0200
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-09-30 07:52:39 +0000
commit0f98b361288c71fca327969346db32de098c797b (patch)
tree8b2905a6e763832a0029179d655c481b14e0a8a1 /ethosu/vela/tflite_reader.py
parent0265f402c7ae1e875470298b4130fcc2f7ab4e23 (diff)
downloadethos-u-vela-0f98b361288c71fca327969346db32de098c797b.tar.gz
[MLBEDSW-2802] Fix 5D tensor crash
Fixed crash in networks with 5D tensors. Fixed crash for (int32) tensors without quantization. Added validity checks for concatenation. Moved unfusing of activation function from tflite_reader to graph_optimiser. Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com> Change-Id: Ib9ba8891dc95ef5491e15d0feedef44331a26393
Diffstat (limited to 'ethosu/vela/tflite_reader.py')
-rw-r--r--ethosu/vela/tflite_reader.py17
1 files changed, 0 insertions, 17 deletions
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index 7458b907..77cc7963 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -149,8 +149,6 @@ class TFLiteSubgraph:
for out in op.outputs:
out.ops = [op]
- activation_function_to_split_out = None
-
if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
if inputs[1].values is not None:
inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
@@ -192,21 +190,6 @@ class TFLiteSubgraph:
if "depth_multiplier" in op.attrs:
op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
- if "fused_activation_function" in op.attrs:
- if op_type in set(("ConcatTFLite",)):
- act = op.attrs["fused_activation_function"]
- del op.attrs["fused_activation_function"]
- if act is not None:
- activation_function_to_split_out = act
-
- if activation_function_to_split_out is not None:
- act_op = Operation(activation_function_to_split_out, name + activation_function_to_split_out)
- out_tens = op.outputs[0]
- intermediate_tens = out_tens.clone("_act_intermediate")
- act_op.set_output_tensor(out_tens)
- intermediate_tens.ops = [op]
- op.outputs[0] = intermediate_tens
- act_op.inputs = [intermediate_tens]
@staticmethod
def len1_array_to_scalar(arr):