diff options
author | Johan Alfven <johan.alfven@arm.com> | 2023-03-16 11:04:31 +0100 |
---|---|---|
committer | Johan Alfven <johan.alfven@arm.com> | 2023-03-21 11:07:21 +0100 |
commit | 3340a88d51cdce4d00ea82a2211d9e7fd40cf5c5 (patch) | |
tree | 55fea2d4ab65bb9692b906bb5b53a989cbe8bee2 /ethosu/vela/scheduler.py | |
parent | 126558e26df26830c2d331ec0041dc9a4f1a0d38 (diff) | |
download | ethos-u-vela-3340a88d51cdce4d00ea82a2211d9e7fd40cf5c5.tar.gz |
MLBEDSW-7430: Remove non local mem usage from cascade info
- There is a latent bug when calculating the mem usage parallel to the
sub schedule. The error is the calculation done when optimizing the sub
schedules. There the cascade size is withdrawn from the snapshot usage
to decide non local memory usage. The problem is that the cascade mem
usage actually also includes non local memory so the end result will be
zero. This is normally not a problem but it will be when starting to
optimize sub schedule when optimizing for Size.
- The solution is to not include the non local usage in the cascade
info, the scheduler already have this information.
- Corrected usage of persistent initial IFM. This size should not be
included for Dedicated SRAM since only intermediate buffers are in SRAM.
- Added some comment to clarify the code in the cascade builder.
Change-Id: I473b36e0d69550ab6565f4ef028195636b362997
Signed-off-by: Johan Alfven <johan.alfven@arm.com>
Diffstat (limited to 'ethosu/vela/scheduler.py')
-rw-r--r-- | ethosu/vela/scheduler.py | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/ethosu/vela/scheduler.py b/ethosu/vela/scheduler.py index 16531c2c..83e19bc6 100644 --- a/ethosu/vela/scheduler.py +++ b/ethosu/vela/scheduler.py @@ -952,8 +952,7 @@ class Scheduler: if cost[sched_op].cascade: # This Op is part of a cascade - use the cascade's memory usage cascade_info = cascades[cost[sched_op].cascade] - # Non-local memory usage is already included in the cascade_info - peak_mem_usage = max(cascade_info.mem_usage, peak_mem_usage) + op_mem_usage = cascade_info.mem_usage + non_local_mem_usage.get(sched_op, 0) else: # This Op is not part of a cascade - calculate the memory usage op_weight_buffer = sum(tens.storage_size() for tens in cost[sched_op].buffered_weight_tensors) @@ -964,7 +963,7 @@ class Scheduler: + op_weight_buffer + non_local_mem_usage.get(sched_op, 0) ) - peak_mem_usage = max(op_mem_usage, peak_mem_usage) + peak_mem_usage = max(op_mem_usage, peak_mem_usage) return peak_mem_usage @@ -1021,9 +1020,11 @@ class Scheduler: time_for_cascade = ref_cost[sub_schedule_ops[0]].time_index mem_usage_parallel_to_sub_schedule = ref_schedule.memory_snapshot[time_for_cascade] - cascade_info.mem_usage # If the first Op's IFM has other consumers it has to live throughout the whole sub-schedule whether it's - # included in a cascade or not + # included in a cascade or not. Not valid in Dedicated SRAM mode (spilling enabled). persistent_initial_ifm = ( - sub_schedule_ops[0].ifm_size_in_bytes() if len(sub_schedule_ops[0].ifm.connection.consumers) > 1 else 0 + sub_schedule_ops[0].ifm_size_in_bytes() + if not self.arch.is_spilling_enabled() and len(sub_schedule_ops[0].ifm.connection.consumers) > 1 + else 0 ) # Calculate non-local-mem-usage per Operator non_local_mem_usage = {} |