aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/scheduler.py
diff options
context:
space:
mode:
authorPatrik Gustavsson <patrik.gustavsson@arm.com>2020-08-13 13:41:05 +0200
committerFredrik Knutsson <fredrik.knutsson.hunnebo@gmail.com>2020-08-14 10:49:15 +0000
commit458a208c44f70a9848f1e8e2e91f28ce3641c48f (patch)
tree37f23561f75d61746383dafc987b411646baaed8 /ethosu/vela/scheduler.py
parentbe733cf04bb262d4eee791d76f01cecd64ff9255 (diff)
downloadethos-u-vela-458a208c44f70a9848f1e8e2e91f28ce3641c48f.tar.gz
MLBEDSW-2570 Avoid usage of NHCWB16 for some cases
Avoid usage of NHCWB16 when Stack/Pack/Concat is performed in axis 3, and the "concat start" of each slice to be combined is not a multiple of 16. Signed-off-by: Patrik Gustavsson <patrik.gustavsson@arm.com> Change-Id: If3f7b4a3424be3c86fc2dc48e8649ce4c4f49485
Diffstat (limited to 'ethosu/vela/scheduler.py')
-rw-r--r--ethosu/vela/scheduler.py33
1 files changed, 19 insertions, 14 deletions
diff --git a/ethosu/vela/scheduler.py b/ethosu/vela/scheduler.py
index cc9278fd..f3b3a79c 100644
--- a/ethosu/vela/scheduler.py
+++ b/ethosu/vela/scheduler.py
@@ -670,14 +670,16 @@ class DynamicProgrammingScheduler:
for pred_candidate in ps.dag_predecessors:
if len(pred_candidate.outputs) == 1 and pred_candidate.outputs[0] == ifm_tensor:
# we found a predecessor that produces this IFM tensor
- if len(pred_candidate.successors) == 1 and pred_candidate.successors[0] == ps:
- # and it only has one successor, namely us
- if pred_candidate.placement == PassPlacement.Npu:
- if pred_candidate.npu_block_type in self.ifm_stream_npu_blocks:
- # and it is on the Npu
- if not self.avoid_for_spilling(pred_candidate):
- # and fusable - it's a candidate
- pred_pass_list.append(pred_candidate)
+ if not ifm_tensor.avoid_NHCWB16:
+ # and NHCWB16 format is not to be avoided
+ if len(pred_candidate.successors) == 1 and pred_candidate.successors[0] == ps:
+ # and it only has one successor, namely us
+ if pred_candidate.placement == PassPlacement.Npu:
+ if pred_candidate.npu_block_type in self.ifm_stream_npu_blocks:
+ # and it is on the Npu
+ if not self.avoid_for_spilling(pred_candidate):
+ # and fusable - it's a candidate
+ pred_pass_list.append(pred_candidate)
if not pred_pass_list:
return ABORT_SEARCH
@@ -953,12 +955,15 @@ class DynamicProgrammingScheduler:
if output.purpose != TensorPurpose.FeatureMap:
continue
- use_NHCWB16 = True
- for op in output.consumer_list:
- if op is None or op.type == "Reshape":
- use_NHCWB16 = False
- else:
- use_NHCWB16 &= op.run_on_npu
+ use_NHCWB16 = not output.avoid_NHCWB16
+
+ if use_NHCWB16:
+ # Check consumers, to see if NHCWB16 can be used in the output
+ for op in output.consumer_list:
+ if op is None or op.type == "Reshape":
+ use_NHCWB16 = False
+ else:
+ use_NHCWB16 &= op.run_on_npu
if use_NHCWB16:
output.set_format(TensorFormat.NHCWB16, arch)