diff options
author | erik.andersson@arm.com <erik.andersson@arm.com> | 2021-03-10 08:39:23 +0100 |
---|---|---|
committer | erik.andersson@arm.com <erik.andersson@arm.com> | 2021-03-12 13:41:16 +0100 |
commit | 8ba0792731d47de64a59d93359340f3c88fc4a62 (patch) | |
tree | ba7ff1ae0401627f3870cd1d1ff2ae451179a1f8 /ethosu | |
parent | 807278a7d3cc305e232c05b7c098a13485f70203 (diff) | |
download | ethos-u-vela-8ba0792731d47de64a59d93359340f3c88fc4a62.tar.gz |
MLBEDSW-4070: Addresses errors with the LeakyReLU operator.
LeakyReLU IFMs will now have unique addresses
and the alpha tensor will have correct scaling.
Signed-off-by: erik.andersson@arm.com <erik.andersson@arm.com>
Change-Id: If94fa91a0b61175309ac450bf6b38a63362780ab
Diffstat (limited to 'ethosu')
-rw-r--r-- | ethosu/vela/graph_optimiser.py | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index 4e7c0fd3..1e890bb8 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -937,12 +937,13 @@ def convert_lrelu_to_mul_max(op, arch): scalar = 0 else: quantization.scale_f32 = alpha - scalar = 1 + scalar = alpha alpha_tens = create_const_tensor( - op.name + "_alpha_scalar", [], ifm.dtype, [scalar], np.int8, quantization=quantization + op.name + "_alpha_scalar", [], ifm.dtype, [scalar], np.float32, quantization=quantization ) + alpha_tens.quant_values = np.array([1]) mul_alpha.add_input_tensor(alpha_tens) - fm_alpha = ofm.clone(op.name + "_alpha") + fm_alpha = ofm.clone(op.name + "_alpha", set_unique=True) mul_alpha.set_output_tensor(fm_alpha) mul_alpha.set_ifm_ofm_shapes() DebugDatabase.add_optimised(op, mul_alpha) |