diff options
Diffstat (limited to 'ethosu/vela')
-rw-r--r-- | ethosu/vela/supported_operators.py | 20 | ||||
-rw-r--r-- | ethosu/vela/tflite_mapping.py | 11 | ||||
-rw-r--r-- | ethosu/vela/vela.py | 78 |
3 files changed, 101 insertions, 8 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index 46f7a5d3..ccf61042 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -25,6 +25,7 @@ from .numeric_util import is_integer from .operation import get_slice_offsets from .operation import Op from .tensor import check_quantized_tens_scaling_equal +from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN from .tflite_mapping import optype_to_builtintype @@ -37,6 +38,15 @@ def docstring_format_args(args): return docstring +def _optype_formatter(op_list): + # Convert internal op types to external names + output = map(optype_to_builtintype, op_list) + # Remove UNKNOWNs + output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN) + # Order alphabetically + return sorted(output) + + class SupportedOperators: # Categorised lists of supported operators npu_pre_ops = set((Op.SplitSliceRead,)) @@ -99,6 +109,10 @@ class SupportedOperators: filter_range = (1, 8) filter_height_range = (1, 256) filter_product_range = (1, 256 * 256) + # Ordered, external names of op types for the constraint reasons + docstring_shapeless_input_ops = _optype_formatter(shapeless_input_ops) + docstring_supported_int32_tensor_ops = _optype_formatter(supported_int32_tensor_ops) + docstring_supported_fused_activations = _optype_formatter(supported_fused_activations) def __init__(self): # Setup the generic constraints. Note: the order matters @@ -279,7 +293,7 @@ class SupportedOperators: return valid, f"Output Tensor '{ofm.name}' is scalar" @classmethod - @docstring_format_args([shapeless_input_ops]) + @docstring_format_args([docstring_shapeless_input_ops]) def constraint_tens_input_scalar(cls, op): "Scalar Input tensors are only valid for op type: {}" valid = True @@ -320,7 +334,7 @@ class SupportedOperators: return valid, ", ".join(extra) @classmethod - @docstring_format_args([supported_int32_tensor_ops]) + @docstring_format_args([docstring_supported_int32_tensor_ops]) def constraint_tens_int32_ops(cls, op): "Tensors which are int32 are only valid when op type is: {}" valid = True @@ -377,7 +391,7 @@ class SupportedOperators: return valid, ", ".join(extra) @classmethod - @docstring_format_args([supported_fused_activations]) + @docstring_format_args([docstring_supported_fused_activations]) def constraint_faf(cls, op): "The fused activation function (if present) must be one of type: {}" if op.activation is None: diff --git a/ethosu/vela/tflite_mapping.py b/ethosu/vela/tflite_mapping.py index 44ecedcc..ea9e8a36 100644 --- a/ethosu/vela/tflite_mapping.py +++ b/ethosu/vela/tflite_mapping.py @@ -691,10 +691,15 @@ builtin_operator_inv_map = {v[0]: (k, v[1]) for k, v in builtin_operator_map.ite builtin_operator_inv_map[Op.CustomNpuOp] = (BuiltinOperator.CUSTOM, CustomOptionsSerializer()) +BUILTIN_OPERATOR_UNKNOWN = "UNKNOWN" + + +def builtin_type_name(builtin): + return next(k for k, v in vars(BuiltinOperator).items() if v == builtin) + def optype_to_builtintype(op_type): if op_type in builtin_operator_inv_map: - builtin_type = builtin_operator_inv_map[op_type][0] - return next(k for k, v in vars(BuiltinOperator).items() if v == builtin_type) + return builtin_type_name(builtin_operator_inv_map[op_type][0]) else: - return "UNKNOWN" + return BUILTIN_OPERATOR_UNKNOWN diff --git a/ethosu/vela/vela.py b/ethosu/vela/vela.py index 5df20d22..5df21f56 100644 --- a/ethosu/vela/vela.py +++ b/ethosu/vela/vela.py @@ -36,8 +36,11 @@ from .errors import InputFileError from .nn_graph import PassPlacement from .nn_graph import TensorAllocator from .scheduler import ParetoMetric +from .supported_operators import SupportedOperators from .tensor import MemArea from .tensor import Tensor +from .tflite_mapping import builtin_operator_map +from .tflite_mapping import builtin_type_name def process(input_name, enable_debug_db, arch, model_reader_options, compiler_options, scheduler_options): @@ -119,17 +122,83 @@ def print_subgraph_io_summary(nng): print(" Maximum Subgraph Size = {0} KiB".format(max_sg_size)) +def generate_supported_ops(): + lines = [ + "# Supported Ops", + "", + "This file was automatically generated by Vela using the `--supported-ops-report` parameter. ", + f"Vela version: `{__version__}`", + "", + "This file complies with [**CommonMark.**](https://commonmark.org)", + "", + "## Summary Table", + "", + "The table below contains TFLite operators that can be placed on the Ethos-U NPU. ", + "If the constraints are not met, then that operator will be scheduled on the CPU instead. ", + "For any other TFLite operator not listed, will be left untouched and scheduled on the CPU. ", + "Please check the supported operator list for your chosen runtime for further information.", + "", + "| Operator | Constraints |", + "| - | - |", + ] + supported = SupportedOperators() + op_constraint_links = [] + op_list = sorted(((op, builtin_type_name(op)) for op in builtin_operator_map), key=lambda x: x[1]) + for op, name in op_list: + internal_op = builtin_operator_map[op][0] + if internal_op in SupportedOperators.supported_operators: + links = "[Generic](#generic-constraints)" + if internal_op in supported.specific_constraints: + links += f", [Specific](#{name.lower()}-constraints)" + op_constraint_links.append((internal_op, name)) + lines.append(f"| {name} | {links} |") + lines += [ + "", + "## Generic Constraints", + "", + "This is a list of constraints that all NPU operators must satisfy in order to be scheduled on the NPU.", + "", + ] + for constraint in supported.generic_constraints: + # Markdown needs two spaces at the end of a line to render it as a separate line + reason = constraint.__doc__.replace("\n", " \n") + lines.append(f"- {reason}") + for op, name in op_constraint_links: + lines += [ + "", + f"## {name} Constraints", + "", + f"This is a list of constraints that the {name} operator must satisfy in order to be scheduled on the NPU.", + "", + ] + for constraint in supported.specific_constraints[op]: + # Markdown needs two spaces at the end of a line to render it as a separate line + reason = constraint.__doc__.replace("\n", " \n") + lines.append(f"- {reason}") + + # Note. this will generate the file in the CWD + filepath = os.path.join(os.getcwd(), "SUPPORTED_OPS.md") + with open(filepath, "wt") as md: + md.writelines(line + "\n" for line in lines) + print(f"Report file: {filepath}") + + def main(args=None): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser(prog="vela", description="Neural network model compiler for Ethos-U55") + parser.add_argument("--version", action="version", version=__version__) + parser.add_argument( + "--supported-ops-report", + action="store_true", + help="Generate the SUPPORTED_OPS.md file in the current working directory and exits.", + ) parser.add_argument( - "network", metavar="NETWORK", type=str, default=None, nargs=None, help="Filename of network to process" + "network", metavar="NETWORK", type=str, default=None, nargs="?", help="Filename of network to process" ) - parser.add_argument("--version", action="version", version=__version__) parser.add_argument( "--output-dir", type=str, default="output", help="Output directory to write files to (default: %(default)s)" ) @@ -279,6 +348,11 @@ def main(args=None): config = configparser.ConfigParser() config.read_file(f) + # Generate the supported ops report and exit + if args.supported_ops_report: + generate_supported_ops() + return 0 + if args.network is None: parser.error("the following argument is required: NETWORK") |