From 0b8268a0dac80aa22133ca83ed6912d3b565439a Mon Sep 17 00:00:00 2001 From: Louis Verhaard Date: Wed, 5 Aug 2020 16:11:29 +0200 Subject: MLBEDSW-2688: Improved LUT support - Support for more than one 256-byte LUT in SHRAM - No DMA is performed for a LUT that is already located in SHRAM - Added MemArea.Shram, used for LUT, to avoid false address collision asserts during SRAM tensor allocation - Added read access to LUT in memory access calculation Change-Id: If4d1eded5ed029d253f4f5efb2d80495fc3eac99 Signed-off-by: Louis Verhaard --- ethosu/vela/tensor_allocation.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'ethosu/vela/tensor_allocation.py') diff --git a/ethosu/vela/tensor_allocation.py b/ethosu/vela/tensor_allocation.py index f29296d1..bb91145e 100644 --- a/ethosu/vela/tensor_allocation.py +++ b/ethosu/vela/tensor_allocation.py @@ -26,6 +26,7 @@ from .greedy_allocation import allocate_live_ranges as greedy_allocate_live_rang from .nn_graph import TensorAllocator from .tensor import MemArea from .tensor import MemType +from .tensor import TensorPurpose def linear_allocate_live_ranges(live_ranges, alloc_granularity=16): @@ -44,6 +45,11 @@ def linear_allocate_live_ranges(live_ranges, alloc_granularity=16): if allocated_tens.weight_compression_config == tens.weight_compression_config: address = allocated_tens.address break + if tens.purpose == TensorPurpose.LUT: + for allocated_tens in allocated_tensors: + if allocated_tens.equivalent(tens): + address = allocated_tens.address + break lr.set_address(address) allocated_tensors += lr.tensors if address == total_sz: -- cgit v1.2.1