diff options
author | Fredrik Svedberg <fredrik.svedberg@arm.com> | 2021-10-07 10:54:20 +0200 |
---|---|---|
committer | Fredrik Svedberg <fredrik.svedberg@arm.com> | 2021-10-07 11:00:51 +0200 |
commit | 6f87be40a97a46a97c52a81e6e46eda0bdb73f9e (patch) | |
tree | 5e7a407c96a015d6098c3ec68f44d6086915e897 /ethosu/vela/high_level_command_to_npu_op.py | |
parent | 838df0a1a61d5ff7b4d6db4629438338d3c6b354 (diff) | |
download | ethos-u-vela-6f87be40a97a46a97c52a81e6e46eda0bdb73f9e.tar.gz |
MLBEDSW-4081 Output diff for some combinations of avgpool + relu (update)
Fixed regression when the AveragePool has explicit rescaling.
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: I121a0cbf9ab15c8862739266e088b5db7805446b
Diffstat (limited to 'ethosu/vela/high_level_command_to_npu_op.py')
-rw-r--r-- | ethosu/vela/high_level_command_to_npu_op.py | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/ethosu/vela/high_level_command_to_npu_op.py b/ethosu/vela/high_level_command_to_npu_op.py index f67114ff..318960ec 100644 --- a/ethosu/vela/high_level_command_to_npu_op.py +++ b/ethosu/vela/high_level_command_to_npu_op.py @@ -214,7 +214,11 @@ def use_zero_point_0(ps, tens: Tensor, is_ifm_tensor: bool) -> bool: ( ps.primary_op.activation is None or forced_ofm_quantization is not None - or (ps.primary_op.type.is_avgpool_op() and ps.primary_op.activation.op_type.is_relu_op()) + or ( + ps.primary_op.type.is_avgpool_op() + and ps.primary_op.activation.op_type.is_relu_op() + and not ps.primary_op.rescale + ) ) and (ps.primary_op.memory_function != Op.ConcatSliceWrite) and not fused_quantize @@ -347,7 +351,7 @@ def create_npu_activation(op: Operation) -> NpuActivation: act = NpuActivation(act_op) act.min = op.activation.min act.max = op.activation.max - if act_op is NpuActivationOp.NONE_OR_RELU and op.type.is_avgpool_op(): + if act_op is NpuActivationOp.NONE_OR_RELU and op.type.is_avgpool_op() and not op.rescale: quant = op.ofm.quantization if quant and quant.zero_point: # Zero point is not 0 scale_f32 = 1 if quant.scale_f32 is None else quant.scale_f32 |