aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacob Bohlin <jacob.bohlin@arm.com>2020-11-02 14:56:29 +0100
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-11-04 08:44:38 +0000
commit37ce38c208601c6a7901d2dc266ed7db6842405b (patch)
tree8779443b39edf6c8dd6c0f40c13f6f46ffa389b7
parent268394d797db60d07eeace05a2c57e927da0ea15 (diff)
downloadethos-u-vela-37ce38c208601c6a7901d2dc266ed7db6842405b.tar.gz
MLBEDSW-3275: Added infinity check for Relu scaling values
Added a supported_operators check for Relu activation functions. If the scaling value overflows to infinity, it will be placed on the CPU. Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com> Change-Id: I66b7bec062599609aadcbb7531caebbc45a7451f Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com>
-rw-r--r--ethosu/vela/supported_operators.py6
1 files changed, 6 insertions, 0 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index dfb7bc7..24c7291 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -774,6 +774,12 @@ class SupportedOperators:
print("Warning:", op.type, "input shape differs from output shape, placing on CPU")
return False
+ elif op.type.is_relu_op():
+ ifm_tensor, ofm_tensor = op.get_ifm_ofm()
+ if np.isinf(ifm_tensor.quantization.scale_f32 / ofm_tensor.quantization.scale_f32):
+ print("Warning:", op.type, "has an infinite scale value, placing on CPU")
+ return False
+
return True
@classmethod