aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2021-01-08 11:17:33 +0100
committerLouis Verhaard <louis.verhaard@arm.com>2021-01-19 10:00:52 +0100
commit9a0cff1cd1334f4d3e7dfb542ad0be4f0e71a9de (patch)
treea855bfb699fabb2dda5c22b12673a06fed33edef
parent224e99bd70a443e345d3ea454aedc51bf46cf261 (diff)
downloadethos-u-vela-9a0cff1cd1334f4d3e7dfb542ad0be4f0e71a9de.tar.gz
MLBEDSW-3418: More operator checks for infinity
- Added operator check that OFM scale > smallest float32 number - Generalized the restriction that IFM/OFM scale must not be infinite Change-Id: I918f5ea3d8fdec6e8f6bd6780ed13a19d1234ed6 Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
-rw-r--r--ethosu/vela/supported_operators.py28
-rw-r--r--ethosu/vela/test/test_supported_operators.py15
2 files changed, 31 insertions, 12 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 2e35d77..9d31518 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -135,6 +135,7 @@ class SupportedOperators:
self.generic_constraints.append(SupportedOperators.constraint_tens_quant_scale)
self.generic_constraints.append(SupportedOperators.constraint_tens_quant_per_axis)
self.generic_constraints.append(SupportedOperators.constraint_faf)
+ self.generic_constraints.append(SupportedOperators.constraint_quant_scale_inf)
# Setup specific constraints. Note: the order matters
self.specific_constraints = defaultdict(list)
@@ -180,11 +181,6 @@ class SupportedOperators:
self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_type)
self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_height_range)
self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_product_range)
- # TODO: Check ReduceSum restrictions
-
- # Relu specific checks:
- for op_type in SupportedOperators.relu_ops:
- self.specific_constraints[op_type].append(SupportedOperators.constraint_quant_scale_inf)
# Resizing specific checks:
for op_type in SupportedOperators.resizing_ops:
@@ -552,11 +548,23 @@ class SupportedOperators:
@staticmethod
def constraint_quant_scale_inf(op):
- "The IFM quantization scale divided by the OFM quantization scale must not be infinite"
- ifm_scale = op.ifm.quantization.scale_f32
- ofm_scale = op.ofm.quantization.scale_f32
- valid = not np.isinf(ifm_scale / ofm_scale)
- return valid, f"Op has infinite quantization scale. ifm_scale={ifm_scale} ofm_scale={ofm_scale}"
+ "Input and Output tensors must have quantization scales that fit within float32 precision"
+ if op.ofm is not None and op.ofm.is_quantized():
+ ofm_scale = op.ofm.quantization.scale_f32
+ if ofm_scale < np.finfo(np.float32).tiny:
+ return (
+ False,
+ f"The quantization scale of the output tensor is {ofm_scale}, "
+ + f"minimum supported is: {np.finfo(np.float32).tiny}",
+ )
+ if op.ifm is not None and op.ifm.is_quantized():
+ ifm_scale = op.ifm.quantization.scale_f32
+ if np.isinf(ifm_scale / ofm_scale):
+ return (
+ False,
+ f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
+ )
+ return True, "Op's quantization is ok"
@staticmethod
def constraint_depth_multiplier(op):
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index f1e8f28..d8fbb98 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -93,7 +93,7 @@ def test_constraint_tens_quant_none_check():
def test_constraint_tens_quant_scale():
- # Quantization scale cannot be infinit
+ # Quantization scale cannot be infinite
qp = QuantizationParameters()
qp.zero_point = 0
qp.scale_f32 = np.inf
@@ -248,8 +248,19 @@ def test_constraint_batch_size():
def test_constraint_quant_scale_inf():
+ # Test handling IFM scale/OFM scale is infinite
op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
- op.ofm.quantization.scale_f32 = np.float32(1e-39)
+ op.ifm.quantization.scale_f32 = np.float32(1e9)
+ op.ofm.quantization.scale_f32 = np.float32(1e-35)
+ assert not support.is_operator_supported(op)
+
+
+def test_constraint_ofm_scale_too_small():
+ # Tests handling of OFM scale < 1e-38
+ shp = [1, 10, 20, 16]
+ op = testutil.create_elemwise_op(Op.Mul, "mul", shp, shp, shp, ofm_quant=testutil.default_quant_params(),)
+ assert support.is_operator_supported(op)
+ op.ofm.quantization.scale_f32 = 1e-43
assert not support.is_operator_supported(op)