aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Nevalainen <andreas.nevalainen@arm.com>2020-08-26 10:56:23 +0200
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-08-27 13:01:31 +0000
commitf0c59bf945d7746961fa05186d1353ed91f587bc (patch)
tree34cc51d5bd84d99d09009164fc1101b4a597f370
parentba69518cef84a495c104e51d100875cdca717a22 (diff)
downloadethos-u-vela-f0c59bf945d7746961fa05186d1353ed91f587bc.tar.gz
MLBEDSW-2719: Update kernel size check
Updated kernel size check, width and height was swapped and added weight sum check. Signed-off-by: Andreas Nevalainen <andreas.nevalainen@arm.com> Change-Id: Idb18cf258ac19b3a0d71134dab5a117bcd778b59
-rw-r--r--ethosu/vela/supported_operators.py22
1 files changed, 14 insertions, 8 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index ec81e76b..567c05ca 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -206,14 +206,20 @@ class SupportedOperators:
if weight_tensor.element_size() > 1:
return False
- # check kernel size
- dilated_weight_w = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_w_factor - 1)
- dilated_weight_h = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_h_factor - 1)
- if (
- dilated_weight_w > 64
- or dilated_weight_h > 64
- or dilated_weight_w * dilated_weight_h * weight_tensor.shape[2] > 127 * 65536
- ):
+ # check kernel size [HWIO]
+ dilated_weight_w = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_w_factor - 1)
+ dilated_weight_h = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_h_factor - 1)
+
+ if dilated_weight_w > 64 or dilated_weight_h > 64:
+ return False
+
+ # check weight sums over [HWI]
+ zero_point = weight_tensor.quantization.zero_point
+ quant_weights = weight_tensor.quant_values.astype(np.int64)
+ weights = quant_weights - zero_point
+ totals = np.sum(np.absolute(weights), axis=(0, 1, 2))
+
+ if np.amax(totals) > 127 * 65536:
return False
# check batch size