From 0b8268a0dac80aa22133ca83ed6912d3b565439a Mon Sep 17 00:00:00 2001 From: Louis Verhaard Date: Wed, 5 Aug 2020 16:11:29 +0200 Subject: MLBEDSW-2688: Improved LUT support - Support for more than one 256-byte LUT in SHRAM - No DMA is performed for a LUT that is already located in SHRAM - Added MemArea.Shram, used for LUT, to avoid false address collision asserts during SRAM tensor allocation - Added read access to LUT in memory access calculation Change-Id: If4d1eded5ed029d253f4f5efb2d80495fc3eac99 Signed-off-by: Louis Verhaard --- ethosu/vela/tensor.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'ethosu/vela/tensor.py') diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py index ecca0e0e..312e8f35 100644 --- a/ethosu/vela/tensor.py +++ b/ethosu/vela/tensor.py @@ -54,16 +54,17 @@ class MemArea(enum.IntFlag): Dram = 2 OnChipFlash = 3 OffChipFlash = 4 - Size = OffChipFlash + 1 + Shram = 5 # for LUT + Size = Shram + 1 def display_name(self): - return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "Size")[self.value] + return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "SHRAM", "Size")[self.value] def identifier_name(self): - return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "size")[self.value] + return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "shram", "size")[self.value] def all(): - return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash) + return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash, MemArea.Shram) def __str__(self): return self.name @@ -728,6 +729,9 @@ class Tensor: return True return False + def equivalent(self, tens): + return self.equivalence_id == tens.equivalence_id + def set_all_shapes(self, shape): self.shape = shape self.storage_shape = shape -- cgit v1.2.1