From d1836c7a3293a1da8e34662fa1b52d08bd48c578 Mon Sep 17 00:00:00 2001 From: Patrik Gustavsson Date: Thu, 4 Feb 2021 08:22:18 +0100 Subject: MLBEDSW-3937 Fix moving FM to fast storage Featuremaps were never moved to fast storage when tensor is set to not use NHCWB16. This patch enables the evaluation of feature maps to be moved fast storage, also when tensor use NHWC. Signed-off-by: Patrik Gustavsson Change-Id: I6367c975e7af8739c774cb7c34b43fb9a6776c8c --- ethosu/vela/scheduler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ethosu/vela/scheduler.py b/ethosu/vela/scheduler.py index 6cbff500..90b89421 100644 --- a/ethosu/vela/scheduler.py +++ b/ethosu/vela/scheduler.py @@ -964,10 +964,10 @@ class DynamicProgrammingScheduler: if ps.placement != PassPlacement.Npu: continue for output in ps.outputs: - if output.purpose != TensorPurpose.FeatureMap or output.avoid_NHCWB16: + if output.purpose != TensorPurpose.FeatureMap: continue - use_NHCWB16 = True + use_NHCWB16 = not output.avoid_NHCWB16 use_fast_storage = True rewrites = [] for op in output.consumer_list: @@ -1001,7 +1001,7 @@ class DynamicProgrammingScheduler: # Detect no-op reshapes by comparing their full input and output tensor shapes. inshape = op.ifm_shapes[0] compatible_shape = [(inshape == oper.ofm_shapes[0]) for oper in get_rewrites(op)] - use_NHCWB16 = compatible_shape and all(compatible_shape) + use_NHCWB16 &= compatible_shape and all(compatible_shape) else: use_NHCWB16 = False use_fast_storage = False -- cgit v1.2.1