aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFredrik Svedberg <fredrik.svedberg@arm.com>2020-11-18 11:30:21 +0100
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-11-19 09:29:25 +0000
commitbdf09f9eca75fab14298a94a5402efc15f698e68 (patch)
tree6a894b819de7b28320c1f0386839926ce142b99b
parent4bb989b0e0f82ef2bd25c6848cebf51d253e22ae (diff)
downloadethos-u-vela-bdf09f9eca75fab14298a94a5402efc15f698e68.tar.gz
[MLBEDSW-3300] Fix DepthwiseConv2D fails when bias tensor quant_values are None
Fixed DepthwiseConv2D fails when bias tensor quant_values are None. Also fixed DepthwiseConv2D fails with implicit depth multiplier. Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com> Change-Id: I799a565eefa498ccf7ac626fcd472b8cbd908931
-rw-r--r--ethosu/vela/supported_operators.py2
-rw-r--r--ethosu/vela/tflite_reader.py6
2 files changed, 7 insertions, 1 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 4429238..91fcb5a 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -490,7 +490,7 @@ class SupportedOperators:
def constraint_bias_40bit(op):
"Optional Bias tensor values must fit within 40-bits"
bias = op.bias
- if bias and bias.dtype == DataType.int64:
+ if bias and bias.dtype == DataType.int64 and bias.quant_values is not None:
valid = all(len(bin(quant_value)[2:]) <= 40 for quant_value in bias.quant_values)
return valid, f"Tensor '{bias.name}' has values larger than 40-bits"
return True, "Op has no bias tensor, or it fits in 40-bit"
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index c190f7e..9e20215 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -188,6 +188,12 @@ class TFLiteSubgraph:
if "depth_multiplier" in op.attrs:
op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
+ if op_type == Op.DepthwiseConv2DBias and op.attrs["depth_multiplier"] == 0:
+ # The depth multiplier is implicit and is calculated as weight channels / ifm channels
+ # Note however that the weights have been reshaped above.
+ # The original value is cached above in channel_multiplier
+ op.attrs["depth_multiplier"] = op.weights.shape[2] // op.ifm.shape[-1]
+
faf = op.attrs.pop("fused_activation_function", None)
if faf is not None:
op.activation = create_activation_function(faf)