From 37ce38c208601c6a7901d2dc266ed7db6842405b Mon Sep 17 00:00:00 2001 From: Jacob Bohlin Date: Mon, 2 Nov 2020 14:56:29 +0100 Subject: MLBEDSW-3275: Added infinity check for Relu scaling values Added a supported_operators check for Relu activation functions. If the scaling value overflows to infinity, it will be placed on the CPU. Signed-off-by: Jacob Bohlin Change-Id: I66b7bec062599609aadcbb7531caebbc45a7451f Signed-off-by: Jacob Bohlin --- ethosu/vela/supported_operators.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'ethosu') diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index dfb7bc7d..24c72913 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -774,6 +774,12 @@ class SupportedOperators: print("Warning:", op.type, "input shape differs from output shape, placing on CPU") return False + elif op.type.is_relu_op(): + ifm_tensor, ofm_tensor = op.get_ifm_ofm() + if np.isinf(ifm_tensor.quantization.scale_f32 / ofm_tensor.quantization.scale_f32): + print("Warning:", op.type, "has an infinite scale value, placing on CPU") + return False + return True @classmethod -- cgit v1.2.1