aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/tensor_allocation.py
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2020-08-05 16:11:29 +0200
committerLouis Verhaard <louis.verhaard@arm.com>2020-08-17 15:10:21 +0200
commit0b8268a0dac80aa22133ca83ed6912d3b565439a (patch)
tree159fe485c156d6a3f3a1a65ab1b1a24ff68f2849 /ethosu/vela/tensor_allocation.py
parent458a208c44f70a9848f1e8e2e91f28ce3641c48f (diff)
downloadethos-u-vela-0b8268a0dac80aa22133ca83ed6912d3b565439a.tar.gz
MLBEDSW-2688: Improved LUT support
- Support for more than one 256-byte LUT in SHRAM - No DMA is performed for a LUT that is already located in SHRAM - Added MemArea.Shram, used for LUT, to avoid false address collision asserts during SRAM tensor allocation - Added read access to LUT in memory access calculation Change-Id: If4d1eded5ed029d253f4f5efb2d80495fc3eac99 Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
Diffstat (limited to 'ethosu/vela/tensor_allocation.py')
-rw-r--r--ethosu/vela/tensor_allocation.py6
1 files changed, 6 insertions, 0 deletions
diff --git a/ethosu/vela/tensor_allocation.py b/ethosu/vela/tensor_allocation.py
index f29296d1..bb91145e 100644
--- a/ethosu/vela/tensor_allocation.py
+++ b/ethosu/vela/tensor_allocation.py
@@ -26,6 +26,7 @@ from .greedy_allocation import allocate_live_ranges as greedy_allocate_live_rang
from .nn_graph import TensorAllocator
from .tensor import MemArea
from .tensor import MemType
+from .tensor import TensorPurpose
def linear_allocate_live_ranges(live_ranges, alloc_granularity=16):
@@ -44,6 +45,11 @@ def linear_allocate_live_ranges(live_ranges, alloc_granularity=16):
if allocated_tens.weight_compression_config == tens.weight_compression_config:
address = allocated_tens.address
break
+ if tens.purpose == TensorPurpose.LUT:
+ for allocated_tens in allocated_tensors:
+ if allocated_tens.equivalent(tens):
+ address = allocated_tens.address
+ break
lr.set_address(address)
allocated_tensors += lr.tensors
if address == total_sz: