aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--OPTIONS.md11
-rw-r--r--ethosu/vela/compiler_driver.py5
-rw-r--r--ethosu/vela/errors.py7
-rw-r--r--ethosu/vela/greedy_allocation.py43
-rw-r--r--ethosu/vela/live_range.py29
-rw-r--r--ethosu/vela/tensor_allocation.py20
-rw-r--r--ethosu/vela/test/test_live_range.py7
-rw-r--r--ethosu/vela/vela.py15
8 files changed, 99 insertions, 38 deletions
diff --git a/OPTIONS.md b/OPTIONS.md
index fa5f4136..acda037a 100644
--- a/OPTIONS.md
+++ b/OPTIONS.md
@@ -256,6 +256,17 @@ in optimisations that use less SRAM, albeit at the cost of performance (inferenc
vela network.tflite --weight-estimation-scaling=1.2
```
+### Allocation alignment
+
+Controls the allocation byte alignment. Only affects CPU tensors, NPU tensors will remain 16-byte
+aligned independent of this option. Alignment has to be a power of two and greater or equal to 16.
+**Type: Integer**
+**Default: 16**
+
+```bash
+vela network.tflite --allocation-alignment 128
+```
+
## Verbose Print Options
All of the options below are disabled by default and enabling them will add
diff --git a/ethosu/vela/compiler_driver.py b/ethosu/vela/compiler_driver.py
index 94900ad5..92fe5840 100644
--- a/ethosu/vela/compiler_driver.py
+++ b/ethosu/vela/compiler_driver.py
@@ -36,6 +36,7 @@ from .nn_graph import PassPlacement
from .nn_graph import TensorAllocator
from .rewrite_graph import verify_graph_health
from .tensor import MemType
+from .tensor import Tensor
class CompilerOptions:
@@ -62,6 +63,7 @@ Note the difference between ArchitectureFeatures and CompilerOptions
tensor_allocator=TensorAllocator.Greedy,
timing=False,
output_dir="outputs",
+ allocation_alignment=Tensor.AllocationQuantum,
):
self.verbose_graph = verbose_graph
@@ -78,6 +80,7 @@ Note the difference between ArchitectureFeatures and CompilerOptions
self.tensor_allocator = tensor_allocator
self.timing = timing
self.output_dir = output_dir
+ self.allocation_alignment = allocation_alignment
def __str__(self):
return type(self).__name__ + ": " + str(self.__dict__)
@@ -192,6 +195,7 @@ def compiler_driver(nng, arch, options, scheduler_options):
options.tensor_allocator,
options.verbose_allocation,
options.show_minimum_possible_allocation,
+ allocation_alignment=options.allocation_alignment,
)
# Generate command streams and serialise Npu-ops into tensors
@@ -231,6 +235,7 @@ def compiler_driver(nng, arch, options, scheduler_options):
TensorAllocator.LinearAlloc,
options.verbose_allocation,
options.show_minimum_possible_allocation,
+ allocation_alignment=options.allocation_alignment,
)
npu_performance.calc_performance_for_network(nng, arch)
diff --git a/ethosu/vela/errors.py b/ethosu/vela/errors.py
index 2c93fbc6..1a30d546 100644
--- a/ethosu/vela/errors.py
+++ b/ethosu/vela/errors.py
@@ -52,6 +52,13 @@ class OptionError(VelaError):
self.data = "Incorrect argument to CLI option: {} {}: {}".format(option, option_value, msg)
+class AllocationError(VelaError):
+ """Raised when allocation fails"""
+
+ def __init__(self, msg):
+ self.data = msg
+
+
def OperatorError(op, msg):
"""Called when parsing an operator results in errors"""
diff --git a/ethosu/vela/greedy_allocation.py b/ethosu/vela/greedy_allocation.py
index 1cbfce3f..661644a9 100644
--- a/ethosu/vela/greedy_allocation.py
+++ b/ethosu/vela/greedy_allocation.py
@@ -16,6 +16,7 @@
# Description:
# Allocate tensor addresses using a greedy algorithm.
from . import numeric_util
+from .errors import AllocationError
class GreedyAllocator:
@@ -37,24 +38,25 @@ class GreedyAllocator:
best_offset = numeric_util.round_up(current_top, new_lr.get_alignment())
best_offset_fit = (1 << 64) - 1
+ aligned_size = numeric_util.round_up(size, new_lr.get_alignment())
current_offset = 0
for start_addr, lr in self.current_allocs:
aligned_current_offset = numeric_util.round_up(current_offset, new_lr.get_alignment())
- if aligned_current_offset + size <= start_addr and start_addr - current_offset < best_offset_fit:
+ if aligned_current_offset + aligned_size <= start_addr and start_addr - current_offset < best_offset_fit:
best_offset = current_offset
best_offset_fit = start_addr - current_offset
current_offset = start_addr + lr.size
best_offset = new_lr.set_address(best_offset)
- self.memory_required = max(self.memory_required, best_offset + size)
+ self.memory_required = max(self.memory_required, best_offset + aligned_size)
self.current_allocs.append((best_offset, new_lr))
self.current_allocs = list(sorted(self.current_allocs))
def dealloc(self, lr_to_dealloc):
self.current_allocs = [(start_addr, lr) for start_addr, lr in self.current_allocs if lr != lr_to_dealloc]
- def allocate_live_ranges(self, verbose_allocation):
+ def allocate_live_ranges(self, verbose_allocation, alignment):
lrs = set()
for lr in self.live_ranges.ranges.values():
lrs.add((lr.start_time, lr.end_time, lr))
@@ -68,25 +70,34 @@ class GreedyAllocator:
self.alloc(new_lr)
- assert self.verify_allocation()
+ self.verify_allocation(alignment)
return self.memory_required
- def verify_allocation(self):
+ def verify_allocation(self, alignment):
lrs = list(self.live_ranges.ranges.values())
for n in lrs:
+ for tens in n.tensors:
+ if not all(op and op.run_on_npu for op in tens.ops + tens.consumer_list):
+ # This is a CPU tensor, verify alignment
+ if tens.address % alignment != 0:
+ raise AllocationError("Tensor {} not aligned to {} bytes".format(tens.name, alignment))
+
for m in lrs:
if n != m and n.overlaps_ranges(m):
overlap, tens_n, tens_m = n.overlaps_address(m)
if overlap and not (tens_n.equivalent(tens_m) and tens_n.address == tens_m.address):
- print("Solution failed, overlapping buffer!")
- print(tens_n.address, tens_n.address + n.size, n.name)
- print(tens_m.address, tens_m.address + m.size, m.name)
- print()
- return False
-
- return True
-
-
-def allocate_live_ranges(nng, arch, live_ranges, mem_area, verbose_allocation=False):
+ raise AllocationError(
+ "Overlapping buffers: {}: {} -> {} and {}: {} -> {}".format(
+ n.name,
+ tens_n.address,
+ tens_n.address + n.size,
+ m.name,
+ tens_m.address,
+ tens_m.address + m.size,
+ )
+ )
+
+
+def allocate_live_ranges(nng, arch, live_ranges, mem_area, alignment, verbose_allocation=False):
g = GreedyAllocator(nng, arch, live_ranges, mem_area)
- return g.allocate_live_ranges(verbose_allocation)
+ return g.allocate_live_ranges(verbose_allocation, alignment)
diff --git a/ethosu/vela/live_range.py b/ethosu/vela/live_range.py
index fe00b622..156090f7 100644
--- a/ethosu/vela/live_range.py
+++ b/ethosu/vela/live_range.py
@@ -23,12 +23,13 @@ from .tensor import Tensor
class LiveRange:
- def __init__(self, tens):
+ def __init__(self, tens, alignment):
self.tensors = [] # Tensors that are assigned to the same LiveRange will be allocated to the same address
self.start_time = 99999999999
self.end_time = -1
self.size = 0
self.name = ""
+ self.alignment = alignment
if tens:
self.add_tensor(tens)
@@ -100,15 +101,10 @@ class LiveRange:
return addr
def get_alignment(self):
- # Get max alignment of LiveRange's tensors
- if self.tensors:
- alignment = 0
- for tens in self.tensors:
- alignment = max(alignment, tens.alignment)
+ return self.alignment
- return alignment
-
- return Tensor.AllocationQuantum
+ def set_alignment(self, alignment):
+ self.alignment = max(self.alignment, alignment)
def merge_memory_op_ranges(sg, lr_graph, tensor_should_be_ignored, target_mem_area):
@@ -135,14 +131,15 @@ class LiveRangeGraph:
self.processed_subgraphs = set()
self.current_time = 0
- def get_or_create_range(self, tens):
+ def get_or_create_range(self, tens, alignment=Tensor.AllocationQuantum):
for rng in self.ranges.values():
# Return the live range of the tensor (or it's cpu/npu clone)
if any(tensor in rng.tensors for tensor in [tens, tens.npu_tensor, tens.cpu_tensor]):
+ rng.set_alignment(alignment)
return rng
# No live range found for the tensor, create a new one
- rng = LiveRange(tens)
+ rng = LiveRange(tens, alignment)
self.ranges[tens] = rng
return rng
@@ -225,6 +222,7 @@ def extract_live_ranges_from_cascaded_passes(
use_ifm_ofm_overlap=True,
ignore_subgraph_input_output_tensors=False,
lr_graph=None,
+ allocation_alignment=Tensor.AllocationQuantum,
):
if lr_graph is None:
lr_graph = LiveRangeGraph()
@@ -277,7 +275,7 @@ def extract_live_ranges_from_cascaded_passes(
for tens in cps.inputs:
if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
continue
- rng = lr_graph.get_or_create_range(tens)
+ rng = lr_graph.get_or_create_range(tens, allocation_alignment)
rng.mark_usage(time_for_pass)
cps_primary_op = cps.passes[0].primary_op
@@ -285,6 +283,7 @@ def extract_live_ranges_from_cascaded_passes(
if cps_primary_op and cps_primary_op.type == "NpuOp" and MemType.Permanent_CPU not in target_mem_type_set:
# If the primary-op is an NpuOp that means this is where an Npu subgraph
# is called. Go into said subgraph and extract live ranges before continuing.
+ # Use default allocation alignment of 16 for Npu tensors
npu_sg = cps_primary_op.attrs["subgraph"]
lr_graph = extract_live_ranges_from_cascaded_passes(
npu_sg,
@@ -302,13 +301,13 @@ def extract_live_ranges_from_cascaded_passes(
for tens in cps.intermediates:
if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
continue
- rng = lr_graph.get_or_create_range(tens)
+ rng = lr_graph.get_or_create_range(tens, allocation_alignment)
rng.mark_usage(time_for_pass)
for tens in cps.outputs:
if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
continue
- rng = lr_graph.get_or_create_range(tens)
+ rng = lr_graph.get_or_create_range(tens, allocation_alignment)
output_time = time_for_pass
if not mark_output_tensors_overlapping_with_input_tensors and is_element_wise:
output_time += 1
@@ -338,7 +337,7 @@ def extract_live_ranges_from_cascaded_passes(
for tens in sg.output_tensors:
if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
continue
- rng = lr_graph.get_or_create_range(tens)
+ rng = lr_graph.get_or_create_range(tens, allocation_alignment)
rng.mark_usage(end_time)
# Add subgraph to set of processed subgraphs
diff --git a/ethosu/vela/tensor_allocation.py b/ethosu/vela/tensor_allocation.py
index bb91145e..2d464eec 100644
--- a/ethosu/vela/tensor_allocation.py
+++ b/ethosu/vela/tensor_allocation.py
@@ -22,14 +22,16 @@ import numpy as np
from . import live_range
from . import numeric_util
+from .errors import AllocationError
from .greedy_allocation import allocate_live_ranges as greedy_allocate_live_ranges
from .nn_graph import TensorAllocator
from .tensor import MemArea
from .tensor import MemType
+from .tensor import Tensor
from .tensor import TensorPurpose
-def linear_allocate_live_ranges(live_ranges, alloc_granularity=16):
+def linear_allocate_live_ranges(live_ranges, alloc_granularity=Tensor.AllocationQuantum):
# Allocates using increasing addresses. Duplicate constant tensors will be allocated to the same address
total_sz = 0
allocated_tensors = []
@@ -55,9 +57,19 @@ def linear_allocate_live_ranges(live_ranges, alloc_granularity=16):
if address == total_sz:
total_sz += numeric_util.round_up(int(math.ceil(lr.size)), alloc_granularity)
+ verify_alignment(live_ranges, alloc_granularity)
return total_sz
+def verify_alignment(live_ranges, alignment):
+ for lr in live_ranges.ranges.values():
+ for tens in lr.tensors:
+ if not all(op and op.run_on_npu for op in tens.ops + tens.consumer_list):
+ # This is a CPU tensor, verify alignment
+ if tens.address % alignment != 0:
+ raise AllocationError("Tensor {} not aligned to {} bytes".format(tens.name, alignment))
+
+
def mark_sram_used_for_cascaded_passes(sg, lrs):
end_pos = max(ps.time for ps in sg.cascaded_passes) + 2
mem_usage = np.zeros(end_pos, dtype=np.int64)
@@ -113,6 +125,7 @@ def allocate_tensors(
verbose_allocation=False,
show_minimum_possible_allocation=False,
lr_graph=None,
+ allocation_alignment=Tensor.AllocationQuantum,
):
ignore_subgraph_input_output_tensors = False
lrs = live_range.extract_live_ranges_from_cascaded_passes(
@@ -123,14 +136,15 @@ def allocate_tensors(
use_ifm_ofm_overlap=use_ifm_ofm_overlap,
ignore_subgraph_input_output_tensors=ignore_subgraph_input_output_tensors,
lr_graph=lr_graph,
+ allocation_alignment=allocation_alignment,
)
if lrs.ranges:
tens_alloc = tensor_allocator
if tens_alloc == TensorAllocator.Greedy:
- total_sz = greedy_allocate_live_ranges(sg, arch, lrs, mem_area, verbose_allocation)
+ total_sz = greedy_allocate_live_ranges(sg, arch, lrs, mem_area, allocation_alignment, verbose_allocation)
elif tens_alloc == TensorAllocator.LinearAlloc:
- total_sz = linear_allocate_live_ranges(lrs, 16)
+ total_sz = linear_allocate_live_ranges(lrs, allocation_alignment)
else:
assert 0
diff --git a/ethosu/vela/test/test_live_range.py b/ethosu/vela/test/test_live_range.py
index d087dd99..2a99da54 100644
--- a/ethosu/vela/test/test_live_range.py
+++ b/ethosu/vela/test/test_live_range.py
@@ -20,6 +20,7 @@ from unittest.mock import MagicMock
import pytest
from ethosu.vela.live_range import LiveRange
+from ethosu.vela.tensor import Tensor
class TestLiveRange:
@@ -28,7 +29,7 @@ class TestLiveRange:
tens.storage_size.return_value = 4
tens.name = "test"
- live_range = LiveRange(tens=tens)
+ live_range = LiveRange(tens, Tensor.AllocationQuantum)
assert live_range.size == 4
assert live_range.name == "test"
assert live_range.tensors == [tens]
@@ -39,7 +40,7 @@ class TestLiveRange:
tens.storage_size.side_effect = [4, 3]
tens.name = "test"
- live_range = LiveRange(tens=tens)
+ live_range = LiveRange(tens, Tensor.AllocationQuantum)
live_range.add_tensor(tens)
assert live_range.size == 4
@@ -52,7 +53,7 @@ class TestLiveRange:
tens.storage_size.side_effect = [4, 5]
tens.name = "test"
- live_range = LiveRange(tens=tens)
+ live_range = LiveRange(tens, Tensor.AllocationQuantum)
# Expect an AssertionError with a message
with pytest.raises(AssertionError, match=r".* to the same LiveRange .*"):
live_range.add_tensor(tens)
diff --git a/ethosu/vela/vela.py b/ethosu/vela/vela.py
index 91899c28..923d8ec8 100644
--- a/ethosu/vela/vela.py
+++ b/ethosu/vela/vela.py
@@ -36,6 +36,7 @@ from .nn_graph import PassPlacement
from .nn_graph import TensorAllocator
from .scheduler import ParetoMetric
from .tensor import MemArea
+from .tensor import Tensor
def process(fname, arch, model_reader_options, compiler_options, scheduler_options):
@@ -259,7 +260,12 @@ def main(args=None):
default=1.0,
help=("Performs an additional scaling of weight compression scale estimate (default: %(default)s)"),
)
-
+ parser.add_argument(
+ "--allocation-alignment",
+ type=int,
+ default=Tensor.AllocationQuantum,
+ help=("Controls the allocation byte alignment of cpu tensors (default: %(default)s)"),
+ )
args = parser.parse_args(args=args)
# Read configuration file
@@ -280,6 +286,12 @@ def main(args=None):
else:
force_block_config = None
+ alignment = args.allocation_alignment
+ if alignment < 16:
+ parser.error("the following argument needs to be greater or equal to 16: ALLOCATION_ALIGNMENT")
+ if alignment & (alignment - 1) != 0:
+ parser.error("the following argument needs to be a power of 2: ALLOCATION_ALIGNMENT")
+
arch = architecture_features.ArchitectureFeatures(
vela_config=config,
system_config=args.system_config,
@@ -307,6 +319,7 @@ def main(args=None):
tensor_allocator=args.tensor_allocator,
timing=args.timing,
output_dir=args.output_dir,
+ allocation_alignment=alignment,
)
scheduler_options = scheduler.SchedulerOptions(