From cce872bc3de3ed5f9bf1aa1a8cf9ce41cf2b2520 Mon Sep 17 00:00:00 2001 From: Fredrik Svedberg Date: Thu, 2 Sep 2021 15:20:52 +0200 Subject: MLBEDSW-5056 Output diff wav2letter (int16) Fixed output diff for wav2letter int16 by correcting the scaling used for LeakyRelu. Signed-off-by: Fredrik Svedberg Change-Id: I8be1e14c25d223dc6e42c4ec498ff4d3d9de65d7 --- ethosu/vela/tflite_graph_optimiser.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py index 3f743e43..255a1f5e 100644 --- a/ethosu/vela/tflite_graph_optimiser.py +++ b/ethosu/vela/tflite_graph_optimiser.py @@ -881,14 +881,14 @@ def convert_lrelu_to_mul_max(op, arch): mul_alpha = Operation(Op.Mul, op.name + "_mul_alpha") mul_alpha.add_input_tensor(ifm) # Create const tensor containing alpha as scalar - alpha = op.attrs["alpha"] + alpha = np.float32(op.attrs["alpha"]) quantization = ifm.quantization.clone() quantization.min = 0 quantization.max = alpha * (quantization.quant_max - quantization.quant_min) quantization.zero_point = 0 - if np.isinf(1 / np.float32(alpha)): + if np.isinf(1 / alpha): # Handling of alpha near zero - quantization.scale_f32 = 1 + quantization.scale_f32 = np.float32(1) scalar = 0 else: quantization.scale_f32 = alpha @@ -914,7 +914,7 @@ def convert_lrelu_to_mul_max(op, arch): quantization = ifm.quantization.clone() quantization.min = 0 quantization.max = quantization.quant_max - quantization.quant_min - quantization.scale_f32 = 1 + quantization.scale_f32 = np.float32(1) quantization.zero_point = 0 identity_tens = create_const_tensor( op.name + "_id_scalar", [], ifm.dtype, [1], np.uint8, quantization=quantization -- cgit v1.2.1