aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacob Bohlin <jacob.bohlin@arm.com>2020-08-20 10:53:02 +0200
committertim.hall <tim.hall@arm.com>2020-08-21 15:30:36 +0000
commit67e0d8f24fcb86115e834acd19dc57027b03ea4f (patch)
tree748a85cc9aca976b74e18d1e4bead38344c32922
parent1575b9413de2569de25bb2520b898a91f24ad3b0 (diff)
downloadethos-u-vela-67e0d8f24fcb86115e834acd19dc57027b03ea4f.tar.gz
MLBEDSW-2663: Handle optional tensors
Includes a number of changes: * Handle non-existing optional inputs * Handle disabled optional inputs (-1 indexed) * Added unit tests for parsing operators * Add bias tensor to the different Convolutions + FullyConnected if it's missing. Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com> Change-Id: Ib88d2b610314b1c886fc0aef4f9da87430ce6ae5
-rw-r--r--ethosu/vela/graph_optimiser.py24
-rw-r--r--ethosu/vela/nn_graph.py4
-rw-r--r--ethosu/vela/operation.py9
-rw-r--r--ethosu/vela/rewrite_graph.py11
-rw-r--r--ethosu/vela/supported_operators.py22
-rw-r--r--ethosu/vela/test/test_tflite_reader.py43
-rw-r--r--ethosu/vela/tflite_reader.py17
7 files changed, 104 insertions, 26 deletions
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index 26ee73c2..78c0dcd4 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -178,17 +178,6 @@ def fixup_conv2d_backprop(op, arch):
# flip the inputs
op.inputs[0], op.inputs[2] = op.inputs[2], op.inputs[0]
op.type = "Conv2DBackpropInputSwitchedBias"
- weight_shape = op.inputs[1].shape
- weight_sets = weight_shape[3]
-
- if len(op.inputs) < 4:
- # Add bias/scale tensor filled with zeros
- scale_tens = Tensor([weight_sets], DataType.int32, op.name + "_bias_tens")
- scale_tens.values = [0] * weight_sets
- scale_tens.quant_values = [0] * weight_sets
- scale_op = Operation("Const", op.name + "_bias")
- scale_op.set_output_tensor(scale_tens)
- op.add_input_tensor(scale_tens)
# Update strides
op.attrs.update({"stride_w": 1, "stride_h": 1, "strides": (1, 1, 1, 1)})
@@ -649,6 +638,18 @@ def add_attrs_to_resizebilinear(op, arch):
return op
+def add_bias_tensor(op, arch):
+ if ("Conv2d" in op.type or op.type.startswith("FullyConnected")) and not op.inputs[-1]:
+ # Add bias/scale tensor filled with zeros
+ weight_shape = op.inputs[1].shape
+ weight_sets = weight_shape[-1]
+ bias_values = [0] * weight_sets
+ scale_tens = create_const_tensor(op.name + "_bias", [weight_sets], DataType.int32, bias_values)
+ op.set_input_tensor(scale_tens, -1)
+
+ return op
+
+
def supported_operator_check(op, arch):
op.run_on_npu = arch.supported_operators.is_operator_supported(op)
return op
@@ -677,6 +678,7 @@ def optimise_graph_a(nng, arch, verbose_graph=False):
fixup_elementwise_with_scalars,
reorder_depthwise_weights,
fixup_resizebilinear,
+ add_bias_tensor,
# convert_mul_max_to_abs_or_lrelu # TODO: enable optimisation once quantisation issues are resolved
]
diff --git a/ethosu/vela/nn_graph.py b/ethosu/vela/nn_graph.py
index bfab2270..b495828e 100644
--- a/ethosu/vela/nn_graph.py
+++ b/ethosu/vela/nn_graph.py
@@ -167,6 +167,8 @@ class Subgraph:
visit_op_set.add(op)
for inp in op.inputs:
+ if not inp:
+ continue
if print_visit:
print(inp, "adding consumer", op)
visit_tensor(inp)
@@ -190,6 +192,8 @@ class Subgraph:
for ps in self.passes:
for tens in ps.outputs + ps.inputs:
+ if not tens:
+ continue
tens.consumer_list = [] # reset unvisited tensors to start with
for tens in self.output_tensors:
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index 3b34fe81..c1ca3f81 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -312,6 +312,15 @@ input and output tensors, as well as an attribute dictionary."""
if self not in tens.consumer_list:
tens.consumer_list.append(self)
+ def set_input_tensor(self, tens, idx):
+ tens_to_remove = self.inputs[idx]
+ if tens_to_remove in tens.consumer_list:
+ tens.consumer_list.remove(tens_to_remove)
+
+ self.inputs[idx] = tens
+ if self not in tens.consumer_list:
+ tens.consumer_list.append(self)
+
def set_output_tensor(self, tens):
tens.ops = [self]
self.outputs = [tens]
diff --git a/ethosu/vela/rewrite_graph.py b/ethosu/vela/rewrite_graph.py
index 4f0d0107..e76e9617 100644
--- a/ethosu/vela/rewrite_graph.py
+++ b/ethosu/vela/rewrite_graph.py
@@ -69,10 +69,11 @@ def rewrite_graph_pre_order(sg, arch, tensor_rewrite_list, op_rewrite_list, rewr
tens_visit_dict[tens] = res
tens_visit_dict[res] = res
- ops = res.ops
- res.ops = []
- for op in ops:
- res.ops.append(visit_op(op))
+ if res:
+ ops = res.ops
+ res.ops = []
+ for op in ops:
+ res.ops.append(visit_op(op))
return res
sg.output_tensors = [visit_tens(tens) for tens in sg.output_tensors]
@@ -142,6 +143,8 @@ def verify_subgraph_health(sg):
op_visit_dict[op] = op
for tens in op.inputs:
+ if not tens:
+ continue
assert op in tens.consumers()
visit_tens(tens)
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 9e415b51..e6aaca31 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -131,22 +131,32 @@ class SupportedOperators:
def check_generic_restrictions(self, op):
# check fully defined shapes
for t in op.inputs:
+ if not t:
+ continue
if not t.has_fully_defined_shape():
print("Warning:", op.type, "has input(s) of undefined shape, placing on CPU")
return False
if t.shape == [] and op.type not in self.binary_elem_wise_main_ops:
- print("Warning:", op.type, "has input(s) of shape [].",
- "Scalar input or broadcasting is not supported for this operator,",
- "placing on CPU")
+ print(
+ "Warning:",
+ op.type,
+ "has input(s) of shape [].",
+ "Scalar input or broadcasting is not supported for this operator,",
+ "placing on CPU",
+ )
return False
for t in op.outputs:
if not t.has_fully_defined_shape():
print("Warning:", op.type, "has output(s) of undefined shape, placing on CPU")
return False
if t.shape == []:
- print("Warning:", op.type, "has output(s) of shape [].",
- "Scalar input or broadcasting is not supported for this operator,",
- "placing on CPU")
+ print(
+ "Warning:",
+ op.type,
+ "has output(s) of shape [].",
+ "Scalar input or broadcasting is not supported for this operator,",
+ "placing on CPU",
+ )
return False
# check data type
diff --git a/ethosu/vela/test/test_tflite_reader.py b/ethosu/vela/test/test_tflite_reader.py
index 1ba07423..d63c0007 100644
--- a/ethosu/vela/test/test_tflite_reader.py
+++ b/ethosu/vela/test/test_tflite_reader.py
@@ -15,6 +15,9 @@
# limitations under the License.
# Description:
# Contains unit tests for tflite_reader
+from unittest.mock import MagicMock
+from unittest.mock import patch
+
import pytest
from ethosu.vela.tflite_reader import TFLiteSubgraph
@@ -35,3 +38,43 @@ class TestTFLiteSubgraph:
def test_len1_array_to_scalar(self, test_input, expected):
output = TFLiteSubgraph.len1_array_to_scalar(test_input)
assert output == expected
+
+ parse_op_testdata = [
+ # op_type, opt_serializer, inputs, output, expected
+ ("FullyConnected", None, [0, 1, 2], 3, 3), # FC
+ ("FullyConnected", None, [0, 1, -1], 3, 3), # FC disabled Bias
+ ("FullyConnected", None, [0, 1], 3, 3), # FC no Bias
+ ("Conv2D", None, [2, 1, 3], 0, 3), # Conv2D
+ ("Conv2DBackprop", None, [0, 1, 2, 3], 4, 4), # TransposeConv
+ ("Conv2DBackprop", None, [0, 1, 2], 4, 4), # TransposeConv no Bias
+ pytest.param("Conv2D", None, [0, -1, 1], 3, 3, marks=pytest.mark.xfail), # Conv2D no Weights
+ ]
+
+ @pytest.mark.parametrize("op_type, opt_serializer, inputs, output, expected", parse_op_testdata)
+ def test_parse_operator(self, op_type, opt_serializer, inputs, output, expected):
+ with patch.object(TFLiteSubgraph, "__init__", lambda self, graph, subraph: None):
+ # Mock a TFLiteSubGraph
+ sg = TFLiteSubgraph(None, None)
+ sg.graph = MagicMock()
+ sg.graph.operator_codes = [(op_type, opt_serializer)]
+
+ # Mock a couple of tensors
+ sg.tensors = [MagicMock() for _ in range(5)]
+ for i, tens in enumerate(sg.tensors):
+ tens.name = "tensor_{}".format(i)
+ tens.ops = []
+
+ # Mock op data
+ op_data = MagicMock()
+ op_data.OpcodeIndex.return_value = 0
+ op_data.InputsAsNumpy.return_value = inputs
+ op_data.OutputsAsNumpy.return_value = [output]
+
+ sg.parse_operator(0, op_data)
+
+ # Verify the created Operation
+ created_op = sg.tensors[output].ops[0]
+ assert created_op.type == op_type
+ assert len(created_op.inputs) == expected
+ assert created_op.outputs[0].name == "tensor_{}".format(output)
+ assert inputs[-1] != -1 or not created_op.inputs[-1]
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index daa208f1..a2f744d3 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -137,8 +137,8 @@ class TFLiteSubgraph:
def parse_operator(self, op_index, op_data):
op_type, opt_serializer = self.graph.operator_codes[op_data.OpcodeIndex()]
- inputs = [self.tensors[idx] for idx in op_data.InputsAsNumpy()]
- outputs = [self.tensors[idx] for idx in op_data.OutputsAsNumpy()]
+ inputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.InputsAsNumpy()]
+ outputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.OutputsAsNumpy()]
name = "unknown_op_name"
if len(outputs):
name = outputs[0].name
@@ -153,12 +153,19 @@ class TFLiteSubgraph:
if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
- if not op.type.endswith("BackpropInput"):
- inputs[2] = clone_and_reshape_tensor(inputs[2], (0,))
+ if len(inputs) < 3 or (len(inputs) < 4 and "Backprop" in op_type):
+ # No Bias tensor
+ inputs.append(None)
+ if inputs[-1]:
+ inputs[-1] = clone_and_reshape_tensor(inputs[-1], (0,))
if op_type.startswith("FullyConnected"):
inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0))
- inputs[2] = clone_and_reshape_tensor(inputs[2], (0,))
+ if len(inputs) < 3:
+ # No Bias tensor
+ inputs.append(None)
+ if inputs[-1]:
+ inputs[-1] = clone_and_reshape_tensor(inputs[-1], (0,))
if opt_serializer is not None:
op.attrs = opt_serializer.deserialize(op_data)