aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/weight_compressor.py
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2020-05-25 15:05:26 +0200
committerTim Hall <tim.hall@arm.com>2020-06-18 17:53:52 +0100
commit7db78969dc8ead72f3ded81b6d2a6a7ed798ea62 (patch)
tree011bcf579cc8e0f007f9564a98cc5c05df34322b /ethosu/vela/weight_compressor.py
parent78792223369fa34dacd0e69e189af035283da2ae (diff)
downloadethos-u-vela-7db78969dc8ead72f3ded81b6d2a6a7ed798ea62.tar.gz
MLBEDSW-2067: added custom exceptions
Added custom exceptions to handle different types of input errors. Also performed minor formatting changes using flake8/black. Change-Id: Ie5b05361507d5e569aff045757aec0a4a755ae98 Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
Diffstat (limited to 'ethosu/vela/weight_compressor.py')
-rw-r--r--ethosu/vela/weight_compressor.py9
1 files changed, 7 insertions, 2 deletions
diff --git a/ethosu/vela/weight_compressor.py b/ethosu/vela/weight_compressor.py
index 04d684e6..a81b1fb4 100644
--- a/ethosu/vela/weight_compressor.py
+++ b/ethosu/vela/weight_compressor.py
@@ -23,6 +23,7 @@ from ethosu import mlw_codec
from .architecture_features import Block
from .data_type import DataType
+from .errors import UnsupportedFeatureError
from .nn_graph import SchedulingStrategy
from .numeric_util import round_up
from .operation import NpuBlockType
@@ -292,14 +293,18 @@ def calc_scales_and_pack_biases(tens, arch, oc_quantum, rescale_for_faf=False):
for weight_scale in weight_scales
]
else:
- assert False, str(ifm_dtype) + " not implemented"
+ raise UnsupportedFeatureError(
+ "Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name)
+ )
else:
if ifm_dtype == DataType.uint8:
scales = [np.double(ifm_scale * weight_scale * 0x3000) for weight_scale in weight_scales]
elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
scales = [(np.double(ifm_scale * 0x3000) * np.double(weight_scale)) for weight_scale in weight_scales]
else:
- assert False, str(ifm_dtype) + " not implemented"
+ raise UnsupportedFeatureError(
+ "Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name)
+ )
# quantise all of the weight scales into (scale_factor, shift)
if ifm_dtype == DataType.int16: