aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael McGeagh <michael.mcgeagh@arm.com>2020-11-25 12:36:23 +0000
committerMichael McGeagh <michael.mcgeagh@arm.com>2020-11-25 12:36:23 +0000
commit34d29174c28c724a45602c5ebe8fdab4f86edde7 (patch)
treef532b1c859123ac289abddeab3767d84461a8ab8
parent54a6111883f7bb4245770909c4c13ee7c92f41cc (diff)
downloadethos-u-vela-34d29174c28c724a45602c5ebe8fdab4f86edde7.tar.gz
vela: Improve printing of sets
When printing a set in the docstrings for the SUPPORTED_OPS.md file, the order is random. Reuse existing sorted string repr for the operator list and apply to other printed sets (data types) Signed-off-by: Michael McGeagh <michael.mcgeagh@arm.com> Change-Id: I2ac12ea91c2637219e5c24f9a863aa0fc2086e77
-rw-r--r--SUPPORTED_OPS.md10
-rw-r--r--ethosu/vela/supported_operators.py25
2 files changed, 17 insertions, 18 deletions
diff --git a/SUPPORTED_OPS.md b/SUPPORTED_OPS.md
index fdda157..bc1f0aa 100644
--- a/SUPPORTED_OPS.md
+++ b/SUPPORTED_OPS.md
@@ -55,7 +55,7 @@ This is a list of constraints that all NPU operators must satisfy in order to be
- Output tensors cannot be scalar
- Scalar Input tensors are only valid for op type: ADD, MAXIMUM, MINIMUM, MUL, SPLIT, SPLIT_V, SUB
- Input(s) and Output tensors must not be greater than 4D
-- Tensors must be of type: {int8, int32, int16, uint8}
+- Tensors must be of type: int16, int32, int8, uint8
- Tensors which are int32 are only valid when op type is: ADD, MUL, SUB
- Tensor dimensions must be in the range [1, 65535]
- Input(s), Output and Weight tensors must have quantization parameters
@@ -117,7 +117,7 @@ This is a list of constraints that the CONV_2D operator must satisfy in order to
- Weight tensor must be 8-bit
- Weight tensor must be constant
- The sum of the weights cannot exceed 8323072
-- Optional Bias tensor must be of type: {int32, int64}
+- Optional Bias tensor must be of type: int32, int64
- Optional Bias tensor values must fit within 40-bits
- IFM Tensor batch size must be 1
@@ -134,7 +134,7 @@ This is a list of constraints that the DEPTHWISE_CONV_2D operator must satisfy i
- Weight tensor must be 8-bit
- Weight tensor must be constant
- The sum of the weights cannot exceed 8323072
-- Optional Bias tensor must be of type: {int32, int64}
+- Optional Bias tensor must be of type: int32, int64
- Optional Bias tensor values must fit within 40-bits
- IFM Tensor batch size must be 1
- For depth multipliers > 1, IFM channels must be 1 and OFM channels must be equal to the depth multiplier
@@ -145,7 +145,7 @@ This is a list of constraints that the FULLY_CONNECTED operator must satisfy in
- Weight tensor must be 8-bit
- Weight tensor must be constant
-- Optional Bias tensor must be of type: {int32, int64}
+- Optional Bias tensor must be of type: int32, int64
- Optional Bias tensor values must fit within 40-bits
## LEAKY_RELU Constraints
@@ -277,7 +277,7 @@ This is a list of constraints that the TRANSPOSE_CONV operator must satisfy in o
- Weight tensor must be 8-bit
- Weight tensor must be constant
- The sum of the weights cannot exceed 8323072
-- Optional Bias tensor must be of type: {int32, int64}
+- Optional Bias tensor must be of type: int32, int64
- Optional Bias tensor values must fit within 40-bits
- IFM Tensor batch size must be 1
- Stride values for both width and height must be 2
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 6bbb04b..deae75a 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -38,13 +38,17 @@ def docstring_format_args(args):
return docstring
+def _list_formatter(arg):
+ # Order and join into a string representation
+ return ", ".join(sorted(map(str, arg)))
+
+
def _optype_formatter(op_list):
# Convert internal op types to external names
output = map(optype_to_builtintype, op_list)
# Remove UNKNOWNs
output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
- # Order alphabetically and join into a string representation
- return ", ".join(str(op) for op in sorted(output))
+ return _list_formatter(output)
class SupportedOperators:
@@ -110,11 +114,6 @@ class SupportedOperators:
filter_range = (1, 8)
filter_height_range = (1, 256)
filter_product_range = (1, 256 * 256)
- # Ordered, external names of op types for the constraint reasons
- docstring_shapeless_input_ops = _optype_formatter(shapeless_input_ops)
- docstring_supported_int32_tensor_ops = _optype_formatter(supported_int32_tensor_ops)
- docstring_supported_fused_activations = _optype_formatter(supported_fused_activations)
- docstring_per_axis_quant_ops = _optype_formatter(per_axis_quant_ops)
def __init__(self):
# Setup the generic constraints. Note: the order matters
@@ -299,7 +298,7 @@ class SupportedOperators:
return valid, f"Output Tensor '{ofm.name}' is scalar"
@classmethod
- @docstring_format_args([docstring_shapeless_input_ops])
+ @docstring_format_args([_optype_formatter(shapeless_input_ops)])
def constraint_tens_input_scalar(cls, op):
"Scalar Input tensors are only valid for op type: {}"
valid = True
@@ -325,7 +324,7 @@ class SupportedOperators:
return valid, ", ".join(extra)
@classmethod
- @docstring_format_args([supported_op_dtypes])
+ @docstring_format_args([_list_formatter(supported_op_dtypes)])
def constraint_tens_dtype(cls, op):
"Tensors must be of type: {}"
valid = True
@@ -340,7 +339,7 @@ class SupportedOperators:
return valid, ", ".join(extra)
@classmethod
- @docstring_format_args([docstring_supported_int32_tensor_ops])
+ @docstring_format_args([_optype_formatter(supported_int32_tensor_ops)])
def constraint_tens_int32_ops(cls, op):
"Tensors which are int32 are only valid when op type is: {}"
valid = True
@@ -397,7 +396,7 @@ class SupportedOperators:
return valid, ", ".join(extra)
@classmethod
- @docstring_format_args([docstring_per_axis_quant_ops])
+ @docstring_format_args([_optype_formatter(per_axis_quant_ops)])
def constraint_tens_quant_per_axis(cls, op):
"Per-axis quantization is only supported for the following op types: {}"
valid = True
@@ -411,7 +410,7 @@ class SupportedOperators:
return valid, "The following tensor(s) have per-axis quantization parameters: " + ", ".join(extra)
@classmethod
- @docstring_format_args([docstring_supported_fused_activations])
+ @docstring_format_args([_optype_formatter(supported_fused_activations)])
def constraint_faf(cls, op):
"The fused activation function (if present) must be one of type: {}"
if op.activation is None:
@@ -497,7 +496,7 @@ class SupportedOperators:
return valid, f"Tensor '{weights.name}' has the sum of weights: {limit}"
@classmethod
- @docstring_format_args([supported_bias_dtypes])
+ @docstring_format_args([_list_formatter(supported_bias_dtypes)])
def constraint_bias_type(cls, op):
"Optional Bias tensor must be of type: {}"
bias = op.bias