diff options
author | Jacob Bohlin <jacob.bohlin@arm.com> | 2020-08-20 10:53:02 +0200 |
---|---|---|
committer | tim.hall <tim.hall@arm.com> | 2020-08-21 15:30:36 +0000 |
commit | 67e0d8f24fcb86115e834acd19dc57027b03ea4f (patch) | |
tree | 748a85cc9aca976b74e18d1e4bead38344c32922 /ethosu/vela/graph_optimiser.py | |
parent | 1575b9413de2569de25bb2520b898a91f24ad3b0 (diff) | |
download | ethos-u-vela-67e0d8f24fcb86115e834acd19dc57027b03ea4f.tar.gz |
MLBEDSW-2663: Handle optional tensors
Includes a number of changes:
* Handle non-existing optional inputs
* Handle disabled optional inputs (-1 indexed)
* Added unit tests for parsing operators
* Add bias tensor to the different Convolutions + FullyConnected if
it's missing.
Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com>
Change-Id: Ib88d2b610314b1c886fc0aef4f9da87430ce6ae5
Diffstat (limited to 'ethosu/vela/graph_optimiser.py')
-rw-r--r-- | ethosu/vela/graph_optimiser.py | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index 26ee73c2..78c0dcd4 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -178,17 +178,6 @@ def fixup_conv2d_backprop(op, arch): # flip the inputs op.inputs[0], op.inputs[2] = op.inputs[2], op.inputs[0] op.type = "Conv2DBackpropInputSwitchedBias" - weight_shape = op.inputs[1].shape - weight_sets = weight_shape[3] - - if len(op.inputs) < 4: - # Add bias/scale tensor filled with zeros - scale_tens = Tensor([weight_sets], DataType.int32, op.name + "_bias_tens") - scale_tens.values = [0] * weight_sets - scale_tens.quant_values = [0] * weight_sets - scale_op = Operation("Const", op.name + "_bias") - scale_op.set_output_tensor(scale_tens) - op.add_input_tensor(scale_tens) # Update strides op.attrs.update({"stride_w": 1, "stride_h": 1, "strides": (1, 1, 1, 1)}) @@ -649,6 +638,18 @@ def add_attrs_to_resizebilinear(op, arch): return op +def add_bias_tensor(op, arch): + if ("Conv2d" in op.type or op.type.startswith("FullyConnected")) and not op.inputs[-1]: + # Add bias/scale tensor filled with zeros + weight_shape = op.inputs[1].shape + weight_sets = weight_shape[-1] + bias_values = [0] * weight_sets + scale_tens = create_const_tensor(op.name + "_bias", [weight_sets], DataType.int32, bias_values) + op.set_input_tensor(scale_tens, -1) + + return op + + def supported_operator_check(op, arch): op.run_on_npu = arch.supported_operators.is_operator_supported(op) return op @@ -677,6 +678,7 @@ def optimise_graph_a(nng, arch, verbose_graph=False): fixup_elementwise_with_scalars, reorder_depthwise_weights, fixup_resizebilinear, + add_bias_tensor, # convert_mul_max_to_abs_or_lrelu # TODO: enable optimisation once quantisation issues are resolved ] |