diff options
author | Louis Verhaard <louis.verhaard@arm.com> | 2020-08-05 16:11:29 +0200 |
---|---|---|
committer | Louis Verhaard <louis.verhaard@arm.com> | 2020-08-17 15:10:21 +0200 |
commit | 0b8268a0dac80aa22133ca83ed6912d3b565439a (patch) | |
tree | 159fe485c156d6a3f3a1a65ab1b1a24ff68f2849 /ethosu/vela/tensor.py | |
parent | 458a208c44f70a9848f1e8e2e91f28ce3641c48f (diff) | |
download | ethos-u-vela-0b8268a0dac80aa22133ca83ed6912d3b565439a.tar.gz |
MLBEDSW-2688: Improved LUT support
- Support for more than one 256-byte LUT in SHRAM
- No DMA is performed for a LUT that is already located in SHRAM
- Added MemArea.Shram, used for LUT, to avoid false address collision
asserts during SRAM tensor allocation
- Added read access to LUT in memory access calculation
Change-Id: If4d1eded5ed029d253f4f5efb2d80495fc3eac99
Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
Diffstat (limited to 'ethosu/vela/tensor.py')
-rw-r--r-- | ethosu/vela/tensor.py | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py index ecca0e0e..312e8f35 100644 --- a/ethosu/vela/tensor.py +++ b/ethosu/vela/tensor.py @@ -54,16 +54,17 @@ class MemArea(enum.IntFlag): Dram = 2 OnChipFlash = 3 OffChipFlash = 4 - Size = OffChipFlash + 1 + Shram = 5 # for LUT + Size = Shram + 1 def display_name(self): - return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "Size")[self.value] + return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "SHRAM", "Size")[self.value] def identifier_name(self): - return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "size")[self.value] + return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "shram", "size")[self.value] def all(): - return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash) + return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash, MemArea.Shram) def __str__(self): return self.name @@ -728,6 +729,9 @@ class Tensor: return True return False + def equivalent(self, tens): + return self.equivalence_id == tens.equivalence_id + def set_all_shapes(self, shape): self.shape = shape self.storage_shape = shape |