From 7972ee80215e1fd40f0cea39c688680be945d302 Mon Sep 17 00:00:00 2001 From: Johan Alfven Date: Tue, 3 Oct 2023 14:46:22 +0200 Subject: MLBEDSW-8102: Fix regression on Argmax int64 - Fixed a regression where DepthWiseConv used in argmax int64 had the wrong shape. - The error was introduced when adding support for a new operator that changed the weight shape for the cast utility function. That change only worked because reorder_depthwise_weights was called later. Since argmax is converted after reorder_depthwise_weights the cast operator in argmax got the wrong shape. - The fix is to set the correct weight shape in the cast operator and then mark that the weights already have been transposed correctly. Change-Id: I61f5694f078cfcaf0d46d43faead6eb7e0a23ade Signed-off-by: Johan Alfven --- ethosu/vela/operation_util.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ethosu/vela/operation_util.py b/ethosu/vela/operation_util.py index 44a80b2..e2cdc20 100644 --- a/ethosu/vela/operation_util.py +++ b/ethosu/vela/operation_util.py @@ -98,8 +98,8 @@ def create_cast_op( c = ifm.shape[-1] - # Weigth shape is in format [h, w, c, b] - shape = [1, 1, c, 1] + # Weigth shape is in format [h, w, b, c] for DepthwiseConv2D + shape = [1, 1, 1, c] kernel = np.dstack([1] * c) identity_quant = QuantizationParameters(scale_f32=1.0, zero_point=0) op.add_input_tensor( @@ -111,6 +111,9 @@ def create_cast_op( quantization=identity_quant, ), ) + # Set flag to indicate that weights are already in correct order + # and prevent that they are transposed in reorder_depthwise_weights + op.inputs[1].weight_transpose_depthwise = True bias_values = [0] * c dtype = DataType.int64 if op.ifm.dtype == DataType.int16 else DataType.int32 op.add_input_tensor(create_const_tensor(op.name + "_bias", [c], dtype, bias_values)) -- cgit v1.2.1