aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/supported_operators.py
diff options
context:
space:
mode:
authorDwight Lidman <dwight.lidman@arm.com>2020-05-29 09:37:03 +0200
committerTim Hall <tim.hall@arm.com>2020-06-18 17:53:52 +0100
commit42fed9484c9aa2a43bdd1b07f9b66bdecabd821d (patch)
treeb843ef49059d810ac2daa06d3b731e43a8856472 /ethosu/vela/supported_operators.py
parenta9390f7fbd35dca75e80710835f67bb1d75d7c93 (diff)
downloadethos-u-vela-42fed9484c9aa2a43bdd1b07f9b66bdecabd821d.tar.gz
MLBEDSW-2372: Failing assert for ResizeBilinear with upscale != 2x
This commit fixes the failing assert by removing it and instead placing unsupported ResizeBilinear operators on the CPU. It introduces a new graph optimisation function which adds the necessary attributes as well as new operator restrictions for ResizeBilinear. Signed-off-by: Dwight Lidman <dwight.lidman@arm.com> Change-Id: I2feffd0b5a2169ebffbe4f165e450b3f2d140380
Diffstat (limited to 'ethosu/vela/supported_operators.py')
-rw-r--r--ethosu/vela/supported_operators.py17
1 files changed, 15 insertions, 2 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index ce3fa609..729d435a 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -29,6 +29,7 @@ class SupportedOperators:
self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct"))
self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct"))
self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops
+ self.resizing_ops = set(("ResizeBilinear",))
self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct"))
self.mac_main_ops = (
# convolutions
@@ -37,12 +38,12 @@ class SupportedOperators:
| self.depthwise_convolution_ops
# pooling
| self.pooling_ops
+ # resizing/upscaling
+ | self.resizing_ops
# FC layers
| self.fc_vector_products
# RNN/LSTM/GRU
| set(("BlockLSTM"))
- # deconvolution
- | set(("ResizeBilinear",))
)
self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs"))
self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum"))
@@ -90,6 +91,7 @@ class SupportedOperators:
{op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
)
self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
+ self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops})
self.supported_operator_restrictions.update(
{op: self.check_vector_product_restrictions for op in self.fc_vector_products}
)
@@ -206,6 +208,17 @@ class SupportedOperators:
return False
return True
+ def check_resize_restrictions(self, op):
+ # check unsupported upscaling factor
+ if op.type == "ResizeBilinear":
+ upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
+ out_shape = op.outputs[0].shape[1:3]
+ if not op.attrs["align_corners"] and out_shape != upscaled_shape:
+ return False
+ elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
+ return False
+ return True
+
def check_vector_product_restrictions(self, op):
# check data type
ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()