aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/operation.py
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2023-05-16 22:39:14 +0100
committertim.hall <tim.hall@arm.com>2023-05-17 11:05:57 +0000
commit5ff4cd12898f44228288a7969b52dff97be30cb2 (patch)
tree1c8068c02254d5479706e41379bbd1f8c7b33205 /ethosu/vela/operation.py
parent0426fe9de82e0f6b339edbd2bec78a5d322fb05f (diff)
downloadethos-u-vela-5ff4cd12898f44228288a7969b52dff97be30cb2.tar.gz
MLBEDSW-7223: Fusing Pad and AvgPool causes diff
- Fixed an issue with the fusing of PAD and AVERAGE_POOL_2D whereby the rounding away from zero didn't work because it requires the zero point to be at zero but the input padding required it to be set to the desired zero point. This affected both int8 and int16. The solution was to remove it by using the bias prior to the scaling - Refactored the rounding away from zero mode Change-Id: I8f2df69df06d2a9722315c346646e5a901cb2c3b Signed-off-by: Tim Hall <tim.hall@arm.com>
Diffstat (limited to 'ethosu/vela/operation.py')
-rw-r--r--ethosu/vela/operation.py45
1 files changed, 42 insertions, 3 deletions
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index 161b17fd..52f06cf0 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -21,6 +21,7 @@ from __future__ import annotations
import copy
from collections import namedtuple
+from enum import auto
from enum import Enum
from typing import Any
from typing import Dict
@@ -29,7 +30,6 @@ from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
-from .api import NpuRoundingMode
from .errors import VelaError
from .ethos_u55_regs.ethos_u55_regs import resampling_mode
from .numeric_util import full_shape
@@ -44,6 +44,13 @@ PointXY = namedtuple("PointXY", "x y")
PointXYZ = namedtuple("PointXYZ", "x y z")
+class RoundingMode(Enum):
+ TFLite = auto() # Round like TensorFlow Lite
+ ToZero = auto() # Round towards zero (truncate)
+ HalfUp = auto() # Round to nearest with x.5 rounded up towards positive infinity (natural)
+ AwayZero = auto() # Round away from zero (towards infinity)
+
+
class NpuBlockType(Enum):
Default = 0
ConvolutionMxN = 1
@@ -491,7 +498,7 @@ class Operation:
"rescale",
"read_offsets",
"read_shapes",
- "rounding_mode",
+ "_rounding_mode",
"explicit_scaling",
"write_offset",
"write_shape",
@@ -528,7 +535,7 @@ class Operation:
self.ofm_shapes: List[Shape4D] = []
self.read_offsets: List[Optional[Shape4D]] = [None, None] # offset for [ifm, ifm2]
self.read_shapes: List[Optional[Shape4D]] = [None, None] # read shape for [ifm, ifm2]
- self.rounding_mode: Optional[NpuRoundingMode] = None
+ self._rounding_mode: Optional[RoundingMode] = None
# Rescale op in TOSA supplies explicit multiplier and shift values
self.explicit_scaling: Optional[ExplicitScaling] = None
# Write offset, for operations that only produce a part of the OFM
@@ -587,6 +594,38 @@ class Operation:
return self._original_type
@property
+ def rounding_mode(self):
+ return self._rounding_mode
+
+ @rounding_mode.setter
+ def rounding_mode(self, mode: RoundingMode):
+ # All rounding modes are supported by all operators with the exception of rounding away from zero (see comment
+ # below)
+ is_supported = True
+ if mode == RoundingMode.AwayZero:
+ # Rounding away from zero does not have direct hardware support and so the compiler implements it indirectly
+ # in different ways. The exact process depends upon the operator type and not all operators are supported.
+ # Basically, rounding away from zero works by adjusting the accumulated value by a "small" amount before
+ # rounding up with the addition of a half (natural rounding). This "small" amount should be big enough to
+ # cause x.5 to be rounded correctly but small enough that smaller values are not incorrectly rounded. This
+ # is done by slightly adjusting the scale and shift on the ofm tensor using the scale and bias tensor,
+ # it has no affect on global scaling (i.e. the ofm_scale register). In addition, the zero points of the
+ # input and/or output tensors may also require forcing to zero but the exact behaviour also depends upon the
+ # corresponding optimisation performed in graph_optimisation.py where the rounding mode is set
+ is_supported = False
+ if self.original_type == Op.ResizeBilinear and self.type == Op.DepthwiseConv2DBias:
+ is_supported = True
+ if self.original_type == Op.AvgPool and self.type == Op.DepthwiseConv2DBias:
+ is_supported = True
+
+ if is_supported:
+ self._rounding_mode = mode
+ else:
+ assert (
+ False
+ ), f"Setting rounding mode = {mode} on {self.original_type} operator '{self.name}' is not supported."
+
+ @property
def type_changed(self):
return self.type != self.original_type