diff options
author | Dwight Lidman <dwight.lidman@arm.com> | 2020-11-24 13:45:50 +0100 |
---|---|---|
committer | patrik.gustavsson <patrik.gustavsson@arm.com> | 2020-11-26 07:19:01 +0000 |
commit | 0dd21c79ac6ef588e23393064d25e402e16cc2dd (patch) | |
tree | 6933d6bd1df37485f7537deed4b19c2e0af805f3 /ethosu/vela/test/testutil.py | |
parent | 933f55ea6f686d0cf390f4767e87a391686c3df8 (diff) | |
download | ethos-u-vela-0dd21c79ac6ef588e23393064d25e402e16cc2dd.tar.gz |
MLBEDSW-3558: Put FC on CPU when OFM != 2D
This commit adds a constraint to FullyConnected
ops in supported_operators.py that puts any
such op on the CPU if tensor dimensions of the
output(s) are not 2D.
Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I8c898a780b40fc4a1383c09213f0696ea6699b7d
Diffstat (limited to 'ethosu/vela/test/testutil.py')
-rw-r--r-- | ethosu/vela/test/testutil.py | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index ee407b6e..4b2938b9 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -20,6 +20,7 @@ import numpy as np from ethosu.vela import architecture_features from ethosu.vela.data_type import DataType from ethosu.vela.nn_graph import Subgraph +from ethosu.vela.operation import Op from ethosu.vela.operation import Operation from ethosu.vela.tensor import create_const_tensor from ethosu.vela.tensor import QuantizationParameters @@ -90,7 +91,8 @@ def create_op_with_quant_tensors( else: np_type = np.int32 qp = default_quant_params() - qp.zero_point = np.zeros(weights_shape) + if op.type is not Op.FullyConnected: + qp.zero_point = np.zeros(weights_shape) weights = create_const_tensor( "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp ) @@ -98,7 +100,8 @@ def create_op_with_quant_tensors( # Optional bias tensor if bias_shape is not None: qp = default_quant_params() - qp.zero_point = np.zeros(bias_shape) + if op.type is not Op.FullyConnected: + qp.zero_point = np.zeros(bias_shape) bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp) op.add_input_tensor(bias) return op |