aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2023-03-10 18:11:34 +0000
committerTim Hall <tim.hall@arm.com>2023-04-21 18:23:03 +0100
commit2180a172c31f27899d3bf77bfecccc1768667737 (patch)
tree2147c32ae3226599ba19077cd5f3ccf3ffb6a734
parent7b3008a905d2a5122e21f945db7d2a2132473c53 (diff)
downloadethos-u-vela-2180a172c31f27899d3bf77bfecccc1768667737.tar.gz
MLBEDSW-7408: MLCE: Crash when serialising model LSTM
- Added checking and reporting of missing operator attributes when reading and writing TFLite file - Added a TFLite semantic check to ensure that all required attribute fields of builtin operators are read - Added some sanity checks for RESHAPE operators that run on the Ethos-U - Stopped CPU operators from having their attributes modified Change-Id: I05700681acdb09554f5945819717c08a9457295c Signed-off-by: Tim Hall <tim.hall@arm.com>
-rw-r--r--ethosu/vela/operation.py9
-rw-r--r--ethosu/vela/tflite_graph_optimiser.py64
-rw-r--r--ethosu/vela/tflite_mapping.py22
-rw-r--r--ethosu/vela/tflite_model_semantic.py11
-rw-r--r--ethosu/vela/tflite_reader.py17
-rw-r--r--ethosu/vela/tflite_supported_operators.py16
-rw-r--r--ethosu/vela/tflite_writer.py44
7 files changed, 142 insertions, 41 deletions
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index d1670536..eafe3bd1 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -854,15 +854,6 @@ class Operation:
ifm_tensor, ifm2_tensor, ofm_tensor = self.get_ifm_ifm2_ofm()
- if self.type == Op.Reshape:
- # Set ofm shape
- if len(self.inputs) > 1 and self.inputs[1].values is not None:
- ofm_tensor.shape = self.inputs[1].values.flatten().tolist()
- ofm_elements = ofm_tensor.elements()
- # Stretch dimension
- if ofm_elements < 0:
- ofm_tensor.shape[ofm_tensor.shape.index(-1)] = int(ifm_tensor.elements() / abs(ofm_elements))
-
# set all shapes to op, as 4D
if self.type == Op.FullyConnected:
if len(self.ifm.shape) == 2:
diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py
index 393a8323..6297fca2 100644
--- a/ethosu/vela/tflite_graph_optimiser.py
+++ b/ethosu/vela/tflite_graph_optimiser.py
@@ -2080,6 +2080,68 @@ def fixup_dilation_gt2(op, arch, nng):
return op
+def fixup_reshape(op, arch, nng):
+ def _get_explicit_shape(implicit_shape, total_size):
+ # the explicit shape is a copy of the implicit shape but with the special -1 (remaining size) value converted to
+ # the appropriate value
+ if implicit_shape is None:
+ return None
+
+ explicit_shape = list(implicit_shape)
+ if -1 in explicit_shape:
+ explicit_shape[explicit_shape.index(-1)] = int(total_size / abs(np.prod(implicit_shape)))
+
+ return explicit_shape
+
+ if op.type == Op.Reshape:
+ ifm_tensor, _, ofm_tensor = op.get_ifm_ifm2_ofm()
+ ifm_size = ifm_tensor.elements()
+ ofm_shape = ofm_tensor.shape
+
+ new_shape_tensor_shape = op.inputs[1].values.flatten() if len(op.inputs) > 1 else None
+ new_shape_tensor_shape = _get_explicit_shape(new_shape_tensor_shape, ifm_size)
+
+ new_shape_attribute = op.attrs.get("new_shape", None)
+ new_shape_attribute = _get_explicit_shape(new_shape_attribute, ifm_size)
+
+ # if present the new shape tensor overrides the new_shape attribute
+ if new_shape_tensor_shape is not None:
+ # check tensor
+ if not np.array_equal(new_shape_tensor_shape, ofm_shape):
+ print(
+ f"Warning: {optype_to_builtintype(op.type)} '{op.name}' has new shape tensor"
+ f" ({new_shape_tensor_shape}) that does not match output tensor shape {ofm_shape}. Will use output"
+ f" tensor shape."
+ )
+ elif new_shape_attribute is not None:
+ # check attribute
+ if not np.array_equal(new_shape_attribute, ofm_shape):
+ print(
+ f"Warning: {optype_to_builtintype(op.type)} '{op.name}' has new_shape attribute"
+ f" ({new_shape_attribute}) that does not match output tensor shape {ofm_shape}. Will use output"
+ f" tensor shape."
+ )
+ else:
+ print(
+ f"Warning: {optype_to_builtintype(op.type)} '{op.name}' does not have a new shape tensor or a new_shape"
+ f" attribute. Will use output tensor shape {ofm_shape}."
+ )
+
+ # force new shape tensor to output shape
+ new_shape_tensor = create_const_tensor(
+ op.name + "_new_shape", [len(ofm_shape)], DataType.int32, np.array(ofm_shape, np.int32)
+ )
+ if len(op.inputs) > 1:
+ op.set_input_tensor(new_shape_tensor, 1)
+ else:
+ op.add_input_tensor(new_shape_tensor)
+
+ # force new_shape attribute to output shape
+ op.attrs["new_shape"] = ofm_shape
+
+ return op
+
+
def supported_operator_check(op, arch, nng):
op.run_on_npu = arch.tflite_supported_operators.is_operator_supported(op)
return op
@@ -2104,7 +2166,7 @@ def tflite_optimise_graph(nng, arch, force_symmetric_int_weights):
)
# Pre-processing step
- pre_process_list = [supported_operator_check, set_ifm_ofm_op_shapes]
+ pre_process_list = [supported_operator_check, set_ifm_ofm_op_shapes, fixup_reshape]
for idx, sg in enumerate(nng.subgraphs):
nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
diff --git a/ethosu/vela/tflite_mapping.py b/ethosu/vela/tflite_mapping.py
index 98fe287d..14052ce5 100644
--- a/ethosu/vela/tflite_mapping.py
+++ b/ethosu/vela/tflite_mapping.py
@@ -418,10 +418,11 @@ class OptionsSerializer:
def deserialize(self, op_data):
builtin_options = op_data.BuiltinOptions()
attrs = {}
+ attrs["attribute_read_error"] = [] # list of attributes that couldn't be read, empty indicates no error
if builtin_options:
tfattrs = self.cls()
tfattrs.Init(builtin_options.Bytes, builtin_options.Pos)
- for underscore_mem, camelcase_mem, deserialize, serialize, is_vector in self.members:
+ for underscore_mem, camelcase_mem, deserialize, _, is_vector in self.members:
fun = camelcase_mem
if is_vector:
fun += "AsNumpy"
@@ -430,15 +431,22 @@ class OptionsSerializer:
try:
attrs[underscore_mem] = deserialize(attr)
except TypeError:
- print("Warning: {0} could not read attribute '{1}'.".format(self.name, underscore_mem))
+ attrs["attribute_read_error"].append(underscore_mem)
+ else:
+ # all builtin operators should have an options field
+ attrs["attribute_read_error"] = [underscore_mem for underscore_mem, *_ in self.members]
return attrs
def serialize(self, builder, attrs):
ser_attrs = []
- for underscore_mem, camelcase_mem, deserialize, serialize, is_vector in self.members:
- a = serialize(builder, attrs[underscore_mem])
- ser_attrs.append((camelcase_mem, a))
+ attrs["attribute_write_error"] = [] # list of attributes that couldn't be read, empty indicates no error
+ for underscore_mem, camelcase_mem, _, serialize, _ in self.members:
+ try:
+ a = serialize(builder, attrs[underscore_mem])
+ ser_attrs.append((camelcase_mem, a))
+ except KeyError:
+ attrs["attribute_write_error"].append(underscore_mem)
getattr(self.module, self.name + "Start")(builder)
@@ -457,6 +465,8 @@ class CustomOptionsSerializer:
def deserialize(self, op_data):
attrs = {}
+ attrs["attribute_read_error"] = [] # list of attributes that couldn't be read, empty indicates no error
+
custom_options = op_data.CustomOptionsAsNumpy()
attrs["custom_options"] = custom_options
attrs["custom_options_format"] = op_data.CustomOptionsFormat()
@@ -467,6 +477,8 @@ class CustomOptionsSerializer:
return attrs
def serialize(self, builder, attrs):
+ attrs["attribute_write_error"] = [] # list of attributes that couldn't be written, empty indicates no error
+
custom_type = attrs.get("custom_type", CustomType.ThirdPartyOp)
self.custom_opt_format = attrs.get("custom_options_format", self.CUSTOM_OPTIONS_FORMAT_DEFAULT)
diff --git a/ethosu/vela/tflite_model_semantic.py b/ethosu/vela/tflite_model_semantic.py
index 6ba7b835..66770487 100644
--- a/ethosu/vela/tflite_model_semantic.py
+++ b/ethosu/vela/tflite_model_semantic.py
@@ -92,6 +92,7 @@ class TFLiteSemantic:
def __init__(self):
# Setup the generic constraints. Note: the order matters
self.generic_constraints = []
+ self.generic_constraints.append(TFLiteSemantic.constraint_attributes_specified)
self.generic_constraints.append(TFLiteSemantic.constraint_tens_no_dynamic)
self.generic_constraints.append(TFLiteSemantic.constraint_tens_defined_shape)
self.generic_constraints.append(TFLiteSemantic.constraint_tens_output_scalar)
@@ -258,6 +259,16 @@ class TFLiteSemantic:
return valid, f"Unexpected None value for constant tensor: {extra}"
@staticmethod
+ def constraint_attributes_specified(op):
+ "All required operator attributes must be specified"
+ # operators that have been created internally (i.e. not created as part of reading an input network) may not
+ # have the read error attribute
+ attribute_read_error = op.attrs.get("attribute_read_error", [])
+ valid = len(attribute_read_error) == 0
+ extra = ", ".join(attribute_read_error)
+ return valid, f"Op has missing attributes: {extra}"
+
+ @staticmethod
def constraint_tens_no_dynamic(op):
"Input(s) and Output tensors must not be dynamic"
valid = True
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index 2325ff65..2f3192b7 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -41,6 +41,7 @@ from .tflite_mapping import builtin_operator_map
from .tflite_mapping import DataType
from .tflite_mapping import datatype_map
from .tflite_mapping import datatype_map_numpy
+from .tflite_mapping import optype_to_builtintype
class TFLiteSubgraph:
@@ -180,9 +181,11 @@ class TFLiteSubgraph:
init_subgraph_index = op.attrs["init_subgraph_index"]
op.attrs["subgraph"] = (self.graph.nng.subgraphs[init_subgraph_index],)
- if op_type == Op.Reshape and "new_shape" not in op.attrs:
- # Reshape should have an attrib "new_shape" but if it is missing, add it based on the output shape
- op.attrs["new_shape"] = outputs[0].shape
+ if op_type == Op.Reshape:
+ if "new_shape" in op.attrs["attribute_read_error"] and len(inputs) > 1:
+ # the "new_shape" attribute is optional if the new_shape tensor (inputs[1]) is specified. therefore,
+ # remove the attribute read error
+ op.attrs["attribute_read_error"].remove("new_shape")
if op_type == Op.Cast:
# Cast op should have "in/out_data_type" attribs add if missing
@@ -212,6 +215,14 @@ class TFLiteSubgraph:
if custom_code is not None:
op.attrs["custom_code"] = custom_code
+ # finally, report any missing attributes that could not be read during deserialize()
+ attribute_read_error = op.attrs["attribute_read_error"]
+ if len(attribute_read_error) != 0:
+ print(
+ f"Warning: Could not read the following attributes from {optype_to_builtintype(op.type)}"
+ f" '{op.name}' {opt_serializer.name} field: {', '.join(attribute_read_error)}"
+ )
+
@staticmethod
def len1_array_to_scalar(arr):
# The following flatbuffer quantisation fields all return a scalar value of 0 if they are not definied in
diff --git a/ethosu/vela/tflite_supported_operators.py b/ethosu/vela/tflite_supported_operators.py
index 9ace3219..95c7de33 100644
--- a/ethosu/vela/tflite_supported_operators.py
+++ b/ethosu/vela/tflite_supported_operators.py
@@ -847,13 +847,15 @@ class TFLiteSupportedOperators:
valid = True
extra = []
- reshape_tens = op.inputs[1]
- if reshape_tens is not None:
- # constant inputs have either no driving operator or a const one
- # create a list of non-constant inputs
- if not (len(reshape_tens.ops) == 0 or reshape_tens.ops[0].type == Op.Const):
- valid = False
- extra.append(reshape_tens.name)
+ # if a reshape tensor is specified then it must be constant
+ if len(op.inputs) > 1:
+ reshape_tens = op.inputs[1]
+ if reshape_tens is not None:
+ # constant inputs have either no driving operator or a const one
+ # create a list of non-constant inputs
+ if not (len(reshape_tens.ops) == 0 or reshape_tens.ops[0].type == Op.Const):
+ valid = False
+ extra.append(reshape_tens.name)
extra = ", ".join(extra)
return valid, f"Op has non-const input(s): {extra}"
diff --git a/ethosu/vela/tflite_writer.py b/ethosu/vela/tflite_writer.py
index 32982298..8d44774b 100644
--- a/ethosu/vela/tflite_writer.py
+++ b/ethosu/vela/tflite_writer.py
@@ -40,6 +40,7 @@ from .tflite import Tensor
from .tflite_mapping import builtin_operator_inv_map
from .tflite_mapping import BuiltinOperator
from .tflite_mapping import datatype_inv_map
+from .tflite_mapping import optype_to_builtintype
# the python flatbuffer interface is missing a method to add in file identifier. patching it in here:
@@ -310,25 +311,36 @@ class TFLiteSerialiser:
custom_opt_offset = None
if opt_serializer is not None:
attrs = dict(op.attrs)
- if "strides" in attrs:
- attrs["stride_h"] = attrs["strides"][1]
- attrs["stride_w"] = attrs["strides"][2]
- if "ksize" in attrs:
- attrs["filter_height"] = attrs["ksize"][1]
- attrs["filter_width"] = attrs["ksize"][2]
- if "dilation" in attrs:
- attrs["dilation_h_factor"] = attrs["dilation"][1]
- attrs["dilation_w_factor"] = attrs["dilation"][2]
- if "channel_multiplier" in attrs:
- attrs["depth_multiplier"] = attrs["channel_multiplier"]
- if "container" in attrs:
- attrs["container"] = builder.CreateString(attrs["container"])
- if "shared_name" in attrs:
- attrs["shared_name"] = builder.CreateString(attrs["shared_name"])
- attrs["fused_activation_function"] = op.activation.op_type if op.activation is not None else None
+ if op.run_on_npu:
+ if "strides" in attrs:
+ attrs["stride_h"] = attrs["strides"][1]
+ attrs["stride_w"] = attrs["strides"][2]
+ if "ksize" in attrs:
+ attrs["filter_height"] = attrs["ksize"][1]
+ attrs["filter_width"] = attrs["ksize"][2]
+ if "dilation" in attrs:
+ attrs["dilation_h_factor"] = attrs["dilation"][1]
+ attrs["dilation_w_factor"] = attrs["dilation"][2]
+ if "channel_multiplier" in attrs:
+ attrs["depth_multiplier"] = attrs["channel_multiplier"]
+ if "container" in attrs:
+ attrs["container"] = builder.CreateString(attrs["container"])
+ if "shared_name" in attrs:
+ attrs["shared_name"] = builder.CreateString(attrs["shared_name"])
+ attrs["fused_activation_function"] = op.activation.op_type if op.activation is not None else None
builtin_opt_offset, custom_opt_offset = opt_serializer.serialize(builder, attrs)
+ # report any missing attributes that could not be written during serialize().
+ # operators that have been created internally (i.e. not created as part of reading an input network) may not
+ # have the write error attribute
+ attribute_write_error = attrs.get("attribute_write_error", [])
+ if len(attribute_write_error) != 0:
+ print(
+ f"Warning: Could not write the following attributes to {optype_to_builtintype(op.type)}"
+ f" '{op.name}' {opt_serializer.name} field: {', '.join(attribute_write_error)}"
+ )
+
mutating_variable_inputs_offset = self.write_byte_vector([])
Operator.OperatorStart(builder)
Operator.OperatorAddOpcodeIndex(builder, op_idx)