diff options
author | Dwight Lidman <dwight.lidman@arm.com> | 2020-11-16 17:40:46 +0100 |
---|---|---|
committer | patrik.gustavsson <patrik.gustavsson@arm.com> | 2020-11-20 09:51:15 +0000 |
commit | c7187434c11151a6a03f252c8718f3bf6445ef5b (patch) | |
tree | c17655e6a888f567aa5dacc38eff54b5a348c00b /ethosu/vela/test/testutil.py | |
parent | 8956761a84f413e6f4c9c7d6e4409b145f81c289 (diff) | |
download | ethos-u-vela-c7187434c11151a6a03f252c8718f3bf6445ef5b.tar.gz |
MLBEDSW-3302: Reject per-channel scaling for unsupported ops
Vela only supports per-channel scaling for
convolution ops. This commit adds a check that
puts ops with per-channel scaling on the CPU.
A caveat worth mentioning is that neither
TensorFlow Lite or TensorFlow Lite Micro support
per-channel scaling for the CPU placed op,
however the problem is moved away from Vela.
This commit also changes a small utility function
in supported_operators.py used for docstring
formatting.
Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I9ed090592f1d05dd4566d3e54dba1ef405299383
Diffstat (limited to 'ethosu/vela/test/testutil.py')
-rw-r--r-- | ethosu/vela/test/testutil.py | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index b06008af..82588278 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -80,7 +80,9 @@ def create_elemwise_op( return op -def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=None, datatype=DataType.uint8): +def create_op_with_quant_tensors( + op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8 +): ifm = Tensor(ifm_shape, datatype, "in") ifm.quantization = default_quant_params() ofm = Tensor(ofm_shape, datatype, "out") @@ -102,6 +104,12 @@ def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=No "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp ) op.add_input_tensor(weights) + # Optional bias tensor + if bias_shape is not None: + qp = default_quant_params() + qp.zero_point = np.zeros(bias_shape) + bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp) + op.add_input_tensor(bias) return op |