aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael McGeagh <michael.mcgeagh@arm.com>2020-11-10 12:38:25 +0000
committerMichael McGeagh <michael.mcgeagh@arm.com>2020-11-17 14:40:26 +0000
commit837dc1bc42323fa723b72fe51919bc2f013e5a26 (patch)
treee38f2e5ff31a13404c252e62dd13ef877a7e6a49
parent69b3176127ff8522903e087d56e2d2f4ec557d62 (diff)
downloadethos-u-vela-837dc1bc42323fa723b72fe51919bc2f013e5a26.tar.gz
MLBEDSW-3403 Generate supported op report
A new CLI has been added that allows the generation of a report containing a summary table of all TFLite ops that can be placed on the NPU, and what the constraints are for that operator to be successfully scheduled on the NPU. This option will generate a new file, SUPPORTED_OPS.md containing this information, in the current working directory. Signed-off-by: Michael McGeagh <michael.mcgeagh@arm.com> Change-Id: I6a7e2a49f251b76b2ea1168fff78e00da1910b25
-rw-r--r--OPTIONS.md15
-rw-r--r--ethosu/vela/supported_operators.py20
-rw-r--r--ethosu/vela/tflite_mapping.py11
-rw-r--r--ethosu/vela/vela.py78
4 files changed, 115 insertions, 9 deletions
diff --git a/OPTIONS.md b/OPTIONS.md
index 9aaf67b..7d12351 100644
--- a/OPTIONS.md
+++ b/OPTIONS.md
@@ -38,6 +38,19 @@ required Network argument.
vela --version
```
+### Supported Operator Report
+
+Generate the SUPPORTED_OPS.md file in the current working directory and exits.
+Contains a summary table of all TFLite operators that can be placed on the NPU,
+and what the constraints are for that operator to be scheduled on the NPU.
+If the constraints are not met, then it will be scheduled on the CPU instead.
+**Type: N/A**
+**Default: N/A**
+
+```bash
+vela --supported-ops-report
+```
+
### Output Directory
Specifies the output directory of the optimised network model as well as the
@@ -159,8 +172,8 @@ Limit the block config search space. This will result in faster compilation
times but may impact the performance of the output network. Use 0 for unlimited
search.
**Type: Integer**
-**Choices: >= 0**
**Default: 16**
+**Choices: >= 0**
```bash
vela network.tflite --block-config-limit 0
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 46f7a5d..ccf6104 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -25,6 +25,7 @@ from .numeric_util import is_integer
from .operation import get_slice_offsets
from .operation import Op
from .tensor import check_quantized_tens_scaling_equal
+from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
from .tflite_mapping import optype_to_builtintype
@@ -37,6 +38,15 @@ def docstring_format_args(args):
return docstring
+def _optype_formatter(op_list):
+ # Convert internal op types to external names
+ output = map(optype_to_builtintype, op_list)
+ # Remove UNKNOWNs
+ output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
+ # Order alphabetically
+ return sorted(output)
+
+
class SupportedOperators:
# Categorised lists of supported operators
npu_pre_ops = set((Op.SplitSliceRead,))
@@ -99,6 +109,10 @@ class SupportedOperators:
filter_range = (1, 8)
filter_height_range = (1, 256)
filter_product_range = (1, 256 * 256)
+ # Ordered, external names of op types for the constraint reasons
+ docstring_shapeless_input_ops = _optype_formatter(shapeless_input_ops)
+ docstring_supported_int32_tensor_ops = _optype_formatter(supported_int32_tensor_ops)
+ docstring_supported_fused_activations = _optype_formatter(supported_fused_activations)
def __init__(self):
# Setup the generic constraints. Note: the order matters
@@ -279,7 +293,7 @@ class SupportedOperators:
return valid, f"Output Tensor '{ofm.name}' is scalar"
@classmethod
- @docstring_format_args([shapeless_input_ops])
+ @docstring_format_args([docstring_shapeless_input_ops])
def constraint_tens_input_scalar(cls, op):
"Scalar Input tensors are only valid for op type: {}"
valid = True
@@ -320,7 +334,7 @@ class SupportedOperators:
return valid, ", ".join(extra)
@classmethod
- @docstring_format_args([supported_int32_tensor_ops])
+ @docstring_format_args([docstring_supported_int32_tensor_ops])
def constraint_tens_int32_ops(cls, op):
"Tensors which are int32 are only valid when op type is: {}"
valid = True
@@ -377,7 +391,7 @@ class SupportedOperators:
return valid, ", ".join(extra)
@classmethod
- @docstring_format_args([supported_fused_activations])
+ @docstring_format_args([docstring_supported_fused_activations])
def constraint_faf(cls, op):
"The fused activation function (if present) must be one of type: {}"
if op.activation is None:
diff --git a/ethosu/vela/tflite_mapping.py b/ethosu/vela/tflite_mapping.py
index 44ecedc..ea9e8a3 100644
--- a/ethosu/vela/tflite_mapping.py
+++ b/ethosu/vela/tflite_mapping.py
@@ -691,10 +691,15 @@ builtin_operator_inv_map = {v[0]: (k, v[1]) for k, v in builtin_operator_map.ite
builtin_operator_inv_map[Op.CustomNpuOp] = (BuiltinOperator.CUSTOM, CustomOptionsSerializer())
+BUILTIN_OPERATOR_UNKNOWN = "UNKNOWN"
+
+
+def builtin_type_name(builtin):
+ return next(k for k, v in vars(BuiltinOperator).items() if v == builtin)
+
def optype_to_builtintype(op_type):
if op_type in builtin_operator_inv_map:
- builtin_type = builtin_operator_inv_map[op_type][0]
- return next(k for k, v in vars(BuiltinOperator).items() if v == builtin_type)
+ return builtin_type_name(builtin_operator_inv_map[op_type][0])
else:
- return "UNKNOWN"
+ return BUILTIN_OPERATOR_UNKNOWN
diff --git a/ethosu/vela/vela.py b/ethosu/vela/vela.py
index 5df20d2..5df21f5 100644
--- a/ethosu/vela/vela.py
+++ b/ethosu/vela/vela.py
@@ -36,8 +36,11 @@ from .errors import InputFileError
from .nn_graph import PassPlacement
from .nn_graph import TensorAllocator
from .scheduler import ParetoMetric
+from .supported_operators import SupportedOperators
from .tensor import MemArea
from .tensor import Tensor
+from .tflite_mapping import builtin_operator_map
+from .tflite_mapping import builtin_type_name
def process(input_name, enable_debug_db, arch, model_reader_options, compiler_options, scheduler_options):
@@ -119,17 +122,83 @@ def print_subgraph_io_summary(nng):
print(" Maximum Subgraph Size = {0} KiB".format(max_sg_size))
+def generate_supported_ops():
+ lines = [
+ "# Supported Ops",
+ "",
+ "This file was automatically generated by Vela using the `--supported-ops-report` parameter. ",
+ f"Vela version: `{__version__}`",
+ "",
+ "This file complies with [**CommonMark.**](https://commonmark.org)",
+ "",
+ "## Summary Table",
+ "",
+ "The table below contains TFLite operators that can be placed on the Ethos-U NPU. ",
+ "If the constraints are not met, then that operator will be scheduled on the CPU instead. ",
+ "For any other TFLite operator not listed, will be left untouched and scheduled on the CPU. ",
+ "Please check the supported operator list for your chosen runtime for further information.",
+ "",
+ "| Operator | Constraints |",
+ "| - | - |",
+ ]
+ supported = SupportedOperators()
+ op_constraint_links = []
+ op_list = sorted(((op, builtin_type_name(op)) for op in builtin_operator_map), key=lambda x: x[1])
+ for op, name in op_list:
+ internal_op = builtin_operator_map[op][0]
+ if internal_op in SupportedOperators.supported_operators:
+ links = "[Generic](#generic-constraints)"
+ if internal_op in supported.specific_constraints:
+ links += f", [Specific](#{name.lower()}-constraints)"
+ op_constraint_links.append((internal_op, name))
+ lines.append(f"| {name} | {links} |")
+ lines += [
+ "",
+ "## Generic Constraints",
+ "",
+ "This is a list of constraints that all NPU operators must satisfy in order to be scheduled on the NPU.",
+ "",
+ ]
+ for constraint in supported.generic_constraints:
+ # Markdown needs two spaces at the end of a line to render it as a separate line
+ reason = constraint.__doc__.replace("\n", " \n")
+ lines.append(f"- {reason}")
+ for op, name in op_constraint_links:
+ lines += [
+ "",
+ f"## {name} Constraints",
+ "",
+ f"This is a list of constraints that the {name} operator must satisfy in order to be scheduled on the NPU.",
+ "",
+ ]
+ for constraint in supported.specific_constraints[op]:
+ # Markdown needs two spaces at the end of a line to render it as a separate line
+ reason = constraint.__doc__.replace("\n", " \n")
+ lines.append(f"- {reason}")
+
+ # Note. this will generate the file in the CWD
+ filepath = os.path.join(os.getcwd(), "SUPPORTED_OPS.md")
+ with open(filepath, "wt") as md:
+ md.writelines(line + "\n" for line in lines)
+ print(f"Report file: {filepath}")
+
+
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog="vela", description="Neural network model compiler for Ethos-U55")
+ parser.add_argument("--version", action="version", version=__version__)
+ parser.add_argument(
+ "--supported-ops-report",
+ action="store_true",
+ help="Generate the SUPPORTED_OPS.md file in the current working directory and exits.",
+ )
parser.add_argument(
- "network", metavar="NETWORK", type=str, default=None, nargs=None, help="Filename of network to process"
+ "network", metavar="NETWORK", type=str, default=None, nargs="?", help="Filename of network to process"
)
- parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"--output-dir", type=str, default="output", help="Output directory to write files to (default: %(default)s)"
)
@@ -279,6 +348,11 @@ def main(args=None):
config = configparser.ConfigParser()
config.read_file(f)
+ # Generate the supported ops report and exit
+ if args.supported_ops_report:
+ generate_supported_ops()
+ return 0
+
if args.network is None:
parser.error("the following argument is required: NETWORK")