From 42fed9484c9aa2a43bdd1b07f9b66bdecabd821d Mon Sep 17 00:00:00 2001 From: Dwight Lidman Date: Fri, 29 May 2020 09:37:03 +0200 Subject: MLBEDSW-2372: Failing assert for ResizeBilinear with upscale != 2x This commit fixes the failing assert by removing it and instead placing unsupported ResizeBilinear operators on the CPU. It introduces a new graph optimisation function which adds the necessary attributes as well as new operator restrictions for ResizeBilinear. Signed-off-by: Dwight Lidman Change-Id: I2feffd0b5a2169ebffbe4f165e450b3f2d140380 --- ethosu/vela/supported_operators.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'ethosu/vela/supported_operators.py') diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index ce3fa609..729d435a 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -29,6 +29,7 @@ class SupportedOperators: self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct")) self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct")) self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops + self.resizing_ops = set(("ResizeBilinear",)) self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct")) self.mac_main_ops = ( # convolutions @@ -37,12 +38,12 @@ class SupportedOperators: | self.depthwise_convolution_ops # pooling | self.pooling_ops + # resizing/upscaling + | self.resizing_ops # FC layers | self.fc_vector_products # RNN/LSTM/GRU | set(("BlockLSTM")) - # deconvolution - | set(("ResizeBilinear",)) ) self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs")) self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum")) @@ -90,6 +91,7 @@ class SupportedOperators: {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops} ) self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops}) + self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops}) self.supported_operator_restrictions.update( {op: self.check_vector_product_restrictions for op in self.fc_vector_products} ) @@ -206,6 +208,17 @@ class SupportedOperators: return False return True + def check_resize_restrictions(self, op): + # check unsupported upscaling factor + if op.type == "ResizeBilinear": + upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2] + out_shape = op.outputs[0].shape[1:3] + if not op.attrs["align_corners"] and out_shape != upscaled_shape: + return False + elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]: + return False + return True + def check_vector_product_restrictions(self, op): # check data type ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm() -- cgit v1.2.1