aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/softmax.py
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2023-05-16 22:39:14 +0100
committertim.hall <tim.hall@arm.com>2023-05-17 11:05:57 +0000
commit5ff4cd12898f44228288a7969b52dff97be30cb2 (patch)
tree1c8068c02254d5479706e41379bbd1f8c7b33205 /ethosu/vela/softmax.py
parent0426fe9de82e0f6b339edbd2bec78a5d322fb05f (diff)
downloadethos-u-vela-5ff4cd12898f44228288a7969b52dff97be30cb2.tar.gz
MLBEDSW-7223: Fusing Pad and AvgPool causes diff
- Fixed an issue with the fusing of PAD and AVERAGE_POOL_2D whereby the rounding away from zero didn't work because it requires the zero point to be at zero but the input padding required it to be set to the desired zero point. This affected both int8 and int16. The solution was to remove it by using the bias prior to the scaling - Refactored the rounding away from zero mode Change-Id: I8f2df69df06d2a9722315c346646e5a901cb2c3b Signed-off-by: Tim Hall <tim.hall@arm.com>
Diffstat (limited to 'ethosu/vela/softmax.py')
-rw-r--r--ethosu/vela/softmax.py6
1 files changed, 3 insertions, 3 deletions
diff --git a/ethosu/vela/softmax.py b/ethosu/vela/softmax.py
index 5a06c1bd..8f30fa14 100644
--- a/ethosu/vela/softmax.py
+++ b/ethosu/vela/softmax.py
@@ -24,13 +24,13 @@ import numpy as np
from . import fp_math
from . import scaling
-from .api import NpuRoundingMode
from .data_type import DataType
from .debug_database import DebugDatabase
from .operation import ActivationFunction
from .operation import ExplicitScaling
from .operation import Op
from .operation import Operation
+from .operation import RoundingMode
from .operation_util import create_add
from .operation_util import create_clz
from .operation_util import create_depthwise_maxpool
@@ -281,7 +281,7 @@ class SoftMax:
name = f"{self.op.name}_shr{pass_number}"
shift = create_const_tensor(f"{name}_const", [1, 1, 1, 1], DataType.int32, [12], quantization=no_scale_quant)
shr_op = create_shr(name, ifm_exp, shift, no_scale_quant, activation)
- shr_op.rounding_mode = NpuRoundingMode.NATURAL
+ shr_op.rounding_mode = RoundingMode.HalfUp
rescaled_exp = add_op_get_ofm(shr_op)
# PASS 3 - Reduce sum
@@ -443,7 +443,7 @@ class SoftMax:
# PASS 30 - SHR
shr30_op = Operation(Op.SHR, f"{self.op.name}_shr{pass_number}")
- shr30_op.rounding_mode = NpuRoundingMode.NATURAL
+ shr30_op.rounding_mode = RoundingMode.HalfUp
shr30_op.add_input_tensor(scaled_exp)
shr30_op.add_input_tensor(right_shift)
shr30_op.set_output_tensor(ofm)