aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/supported_operators.py
diff options
context:
space:
mode:
authorAndreas Nevalainen <andreas.nevalainen@arm.com>2020-09-11 10:25:09 +0200
committerAndreas Nevalainen <andreas.nevalainen@arm.com>2020-09-22 14:02:26 +0200
commitd8c032d4be2a641946507b63023456312e333cb8 (patch)
tree4f55312012f3cdaf536364601f3fb7f1b2511846 /ethosu/vela/supported_operators.py
parentd9e38fe2bc0458fdca83dd4932abee6554fe2eb2 (diff)
downloadethos-u-vela-d8c032d4be2a641946507b63023456312e333cb8.tar.gz
MLBEDSW-2813: Handle non-const weights and check shapes
- Added check for non-constant weights in supported operators - Added check ifm & ifm2 shapes - Handle None tensors for CPU operators - Handle missing attributes for Cast operator Signed-off-by: Andreas Nevalainen <andreas.nevalainen@arm.com> Change-Id: I2f16d3d44d0c6da5237550b39273cdb9cc3c7607
Diffstat (limited to 'ethosu/vela/supported_operators.py')
-rw-r--r--ethosu/vela/supported_operators.py15
1 files changed, 15 insertions, 0 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index b6551cf9..b0afa2c9 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -227,6 +227,12 @@ class SupportedOperators:
# check batch size
if ifm_tensor.shape[0] != 1:
return False
+
+ # check non const weights
+ if weight_tensor.values is None:
+ print("Warning:", op.type, "has non-const weights, placing on CPU")
+ return False
+
return True
def check_depthwise_convolution_restrictions(self, op):
@@ -317,6 +323,11 @@ class SupportedOperators:
if not self.check_bias_restrictions(bias_tensor):
return False
+ # check non const weights
+ if weight_tensor.values is None:
+ print("Warning:", op.type, "has non-const weights, placing on CPU")
+ return False
+
return True
def check_element_wise_restrictions(self, op):
@@ -362,6 +373,10 @@ class SupportedOperators:
if op.type == "LeakyRelu" and op.attrs["alpha"] < 0:
return False
+ # check if ifm or ifm2 has ofm shape
+ if ifm_tensor.shape != ofm_tensor.shape and ifm2_tensor.shape != ofm_tensor.shape:
+ return False
+
return True
def check_memory_only_restrictions(self, op):