aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/shared_buffer_allocation.py
diff options
context:
space:
mode:
authorFredrik Svedberg <fredrik.svedberg@arm.com>2020-09-29 10:00:39 +0200
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-09-30 07:52:39 +0000
commit0f98b361288c71fca327969346db32de098c797b (patch)
tree8b2905a6e763832a0029179d655c481b14e0a8a1 /ethosu/vela/shared_buffer_allocation.py
parent0265f402c7ae1e875470298b4130fcc2f7ab4e23 (diff)
downloadethos-u-vela-0f98b361288c71fca327969346db32de098c797b.tar.gz
[MLBEDSW-2802] Fix 5D tensor crash
Fixed crash in networks with 5D tensors. Fixed crash for (int32) tensors without quantization. Added validity checks for concatenation. Moved unfusing of activation function from tflite_reader to graph_optimiser. Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com> Change-Id: Ib9ba8891dc95ef5491e15d0feedef44331a26393
Diffstat (limited to 'ethosu/vela/shared_buffer_allocation.py')
-rw-r--r--ethosu/vela/shared_buffer_allocation.py5
1 files changed, 3 insertions, 2 deletions
diff --git a/ethosu/vela/shared_buffer_allocation.py b/ethosu/vela/shared_buffer_allocation.py
index 63e2268d..7657dffa 100644
--- a/ethosu/vela/shared_buffer_allocation.py
+++ b/ethosu/vela/shared_buffer_allocation.py
@@ -38,7 +38,8 @@ class SharedBufferAllocation:
ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = ps.get_primary_op_ifm_ifm2_weights_ofm()
tensors = [t for t in (ifm_tensor, ifm2_tensor, ofm_tensor) if t is not None]
- has_scale = None not in (t.quantization.scale_f32 for t in tensors)
+ scales = [t.quantization.scale_f32 for t in tensors if t.quantization is not None]
+ has_scale = len(tensors) == len(scales) and not None in scales
strides = (1, 1, 1, 1)
dilation = (1, 1, 1, 1)
@@ -192,7 +193,7 @@ def find_block_configs_suitable_for_pass_and_shared_buffer(arch, ps):
# Constrain the search space if the OFM is smaller than the max block size
# - Add other block search constraints here if required
- if len(alloc.ofm_tensor.shape) == 2:
+ if len(alloc.ofm_tensor.shape) <= 2:
max_block_height = max_block_width = alloc.ofm_tensor.shape[0]
else:
max_block_width = alloc.ofm_tensor.shape[-2]