aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2021-02-03 08:43:54 +0100
committerLouis Verhaard <louis.verhaard@arm.com>2021-02-04 11:41:09 +0100
commit3d22f3c4cdff3ed1e392048265f84d07c01080d5 (patch)
treeb2bd550e062b002f0ce0cfc34cc3e17bbaf32fc4
parentfcb1a00cfd4216782f4fc4429ce66c592a0b8030 (diff)
downloadethos-u-vela-3d22f3c4cdff3ed1e392048265f84d07c01080d5.tar.gz
MLBEDSW-3932: Remove Squeeze from supported operators
- Squeeze is no longer listed as supported operator - Added missing doc-string for a Pad constraint Change-Id: Ifd5e493acb0eb28bc4f104df74b3491589db8c29 Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
-rw-r--r--SUPPORTED_OPS.md45
-rw-r--r--ethosu/vela/supported_operators.py3
2 files changed, 27 insertions, 21 deletions
diff --git a/SUPPORTED_OPS.md b/SUPPORTED_OPS.md
index e1de48c..dfa24d0 100644
--- a/SUPPORTED_OPS.md
+++ b/SUPPORTED_OPS.md
@@ -1,7 +1,7 @@
# Supported Ops
This file was automatically generated by Vela using the `--supported-ops-report` parameter.
-Vela version: `2.0.0`
+Vela version: `2.0.2.dev49+gda756aa`
This file complies with
[**Gitiles Markdown syntax**](https://github.com/google/gitiles/blob/master/Documentation/markdown.md)
@@ -22,6 +22,7 @@ Please check the supported operator list for your chosen runtime for further inf
| CONV_2D | [Generic](#generic-constraints), [Specific](#conv_2d-constraints) |
| DEPTHWISE_CONV_2D | [Generic](#generic-constraints), [Specific](#depthwise_conv_2d-constraints) |
| FULLY_CONNECTED | [Generic](#generic-constraints), [Specific](#fully_connected-constraints) |
+| HARD_SWISH | [Generic](#generic-constraints), [Specific](#hard_swish-constraints) |
| LEAKY_RELU | [Generic](#generic-constraints), [Specific](#leaky_relu-constraints) |
| LOGISTIC | [Generic](#generic-constraints) |
| MAXIMUM | [Generic](#generic-constraints), [Specific](#maximum-constraints) |
@@ -29,17 +30,17 @@ Please check the supported operator list for your chosen runtime for further inf
| MINIMUM | [Generic](#generic-constraints), [Specific](#minimum-constraints) |
| MUL | [Generic](#generic-constraints), [Specific](#mul-constraints) |
| PACK | [Generic](#generic-constraints) |
+| PAD | [Generic](#generic-constraints), [Specific](#pad-constraints) |
| QUANTIZE | [Generic](#generic-constraints) |
-| RELU | [Generic](#generic-constraints), [Specific](#relu-constraints) |
-| RELU6 | [Generic](#generic-constraints), [Specific](#relu6-constraints) |
-| RELU_N1_TO_1 | [Generic](#generic-constraints), [Specific](#relu_n1_to_1-constraints) |
+| RELU | [Generic](#generic-constraints) |
+| RELU6 | [Generic](#generic-constraints) |
+| RELU_N1_TO_1 | [Generic](#generic-constraints) |
| RESHAPE | [Generic](#generic-constraints) |
| RESIZE_BILINEAR | [Generic](#generic-constraints), [Specific](#resize_bilinear-constraints) |
| SLICE | [Generic](#generic-constraints) |
| SOFTMAX | [Generic](#generic-constraints), [Specific](#softmax-constraints) |
| SPLIT | [Generic](#generic-constraints) |
| SPLIT_V | [Generic](#generic-constraints), [Specific](#split_v-constraints) |
-| SQUEEZE | [Generic](#generic-constraints) |
| STRIDED_SLICE | [Generic](#generic-constraints), [Specific](#strided_slice-constraints) |
| SUB | [Generic](#generic-constraints), [Specific](#sub-constraints) |
| TANH | [Generic](#generic-constraints) |
@@ -62,6 +63,7 @@ This is a list of constraints that all NPU operators must satisfy in order to be
- Input(s), Output and Weight tensors with quantization scales must be finite
- Per-axis quantization is only supported for the following op types: CONV_2D, DEPTHWISE_CONV_2D, TRANSPOSE_CONV
- The fused activation function (if present) must be one of type: LOGISTIC, RELU, RELU6, RELU_N1_TO_1, TANH
+- Input and Output tensors must have quantization scales that fit within float32 precision
## ABS Constraints
@@ -149,6 +151,13 @@ This is a list of constraints that the FULLY_CONNECTED operator must satisfy in
- Optional Bias tensor values must fit within 40-bits
- The output tensor(s) must have 2D shape
+## HARD_SWISH Constraints
+
+This is a list of constraints that the HARD_SWISH operator must satisfy in order to be scheduled on the NPU.
+
+- IFM must be int8 or uint8
+- IFM and OFM data types must match
+
## LEAKY_RELU Constraints
This is a list of constraints that the LEAKY_RELU operator must satisfy in order to be scheduled on the NPU.
@@ -201,23 +210,19 @@ This is a list of constraints that the MUL operator must satisfy in order to be
- For IFM that are unsigned, OFM must either be the same type or int32
- Broadcasting is only allowed for rank indices with dimension 1, from either IFM1 or IFM2
-## RELU Constraints
-
-This is a list of constraints that the RELU operator must satisfy in order to be scheduled on the NPU.
-
-- The IFM quantization scale divided by the OFM quantization scale must not be infinite
+## PAD Constraints
-## RELU6 Constraints
+This is a list of constraints that the PAD operator must satisfy in order to be scheduled on the NPU.
-This is a list of constraints that the RELU6 operator must satisfy in order to be scheduled on the NPU.
-
-- The IFM quantization scale divided by the OFM quantization scale must not be infinite
-
-## RELU_N1_TO_1 Constraints
-
-This is a list of constraints that the RELU_N1_TO_1 operator must satisfy in order to be scheduled on the NPU.
-
-- The IFM quantization scale divided by the OFM quantization scale must not be infinite
+- IFM and OFM data types must match
+- Both Input quantization parameters must match OFM quantization parameters
+- Number of input tensors must be exactly 2
+- The padding tensor must have the shape [4,2]
+- The pad tensor can only pad width and height
+- Pad tensor must be of type: int32, int64
+- The padding tensor must be constant
+- Must be followed by one of the following operator types: CONV_2D, DEPTHWISE_CONV_2D
+- Padding must be at most kernel size divided by 2
## RESIZE_BILINEAR Constraints
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 8bb9c58..8446ec2 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -98,7 +98,7 @@ class SupportedOperators:
)
split_ops = set((Op.Split, Op.SplitV, Op.StridedSlice, Op.Slice, Op.UnpackReshaped, Op.Unpack,))
concat_ops = set((Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack,))
- memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape,)) | concat_ops | split_ops
+ memory_only_ops = set((Op.Reshape, Op.QuantizedReshape,)) | concat_ops | split_ops
shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV,))
per_axis_quant_ops = convolution_like_ops # per-axis/channel quantization only currently supported for conv ops
supported_fused_activations = relu_ops | set((Op.Tanh, Op.Sigmoid, Op.LUT,))
@@ -839,6 +839,7 @@ class SupportedOperators:
@staticmethod
def constraint_pad_constant(op):
+ "The padding tensor must be constant"
pad_tensor = op.inputs[1].values
valid = pad_tensor is not None
return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"