aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/tensor_allocation.py
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2020-05-07 08:12:58 +0200
committerTim Hall <tim.hall@arm.com>2020-06-18 17:53:52 +0100
commit3c07c97e0202c1cf01eba06c24b37a8f15ff7a7c (patch)
tree5856b7727a99b3c0baa00f5486f0c3b53e8e38e6 /ethosu/vela/tensor_allocation.py
parent86d49935c3736c7aaa419abda07fa20c37c991a8 (diff)
downloadethos-u-vela-3c07c97e0202c1cf01eba06c24b37a8f15ff7a7c.tar.gz
MLBEDSW-1941: Bug fix shared weights
If same weight tensor was used with different block configs, errors would occur. Fixed by always cloning weight tensors, using a global weight compression cache and modifying the linear allocator to detect multiple usage of same weight compression. Change-Id: I91ca59176e1c59c66e0ac7a4227f2b5f0b47053f Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
Diffstat (limited to 'ethosu/vela/tensor_allocation.py')
-rw-r--r--ethosu/vela/tensor_allocation.py16
1 files changed, 12 insertions, 4 deletions
diff --git a/ethosu/vela/tensor_allocation.py b/ethosu/vela/tensor_allocation.py
index cd2b570f..e3952df3 100644
--- a/ethosu/vela/tensor_allocation.py
+++ b/ethosu/vela/tensor_allocation.py
@@ -27,18 +27,26 @@ from .nn_graph import TensorAllocator
from .tensor import MemArea
-def linear_allocate_live_ranges(live_ranges, alloc_granularity=256):
+def linear_allocate_live_ranges(live_ranges, alloc_granularity=16):
+ # Allocates using increasing addresses. Duplicate constant tensors will be allocated to the same address
total_sz = 0
allocated_tensors = []
- # just assign increasing addresses
+ # just assign increasing addresses, except for duplicates
for tens, lr in live_ranges.ranges.items():
if tens in allocated_tensors:
continue
- lr.set_address(total_sz)
+ address = total_sz
+ if tens.weight_compression_config is not None:
+ for allocated_tens in allocated_tensors:
+ if allocated_tens.weight_compression_config == tens.weight_compression_config:
+ address = allocated_tens.address
+ break
+ lr.set_address(address)
allocated_tensors += lr.tensors
- total_sz += numeric_util.round_up(int(math.ceil(lr.size)), alloc_granularity)
+ if address == total_sz:
+ total_sz += numeric_util.round_up(int(math.ceil(lr.size)), alloc_granularity)
return total_sz