diff options
Diffstat (limited to 'ethosu/vela/test')
-rw-r--r-- | ethosu/vela/test/test_supported_operators.py | 16 | ||||
-rw-r--r-- | ethosu/vela/test/testutil.py | 7 |
2 files changed, 21 insertions, 2 deletions
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py index 72ccad24..f132eef7 100644 --- a/ethosu/vela/test/test_supported_operators.py +++ b/ethosu/vela/test/test_supported_operators.py @@ -122,6 +122,22 @@ def test_constraint_tens_quant_per_axis_is_supp(): assert support.is_operator_supported(op) +def test_constraint_fc_output_2d_not_supp(): + op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1], [3, 2, 2, 1], weights_shape=[12, 1, 1, 1]) + assert not support.is_operator_supported(op) + op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1, 1, 1], [1, 3, 4], weights_shape=[12, 1, 1, 1]) + assert not support.is_operator_supported(op) + op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1, 1, 1], [1], weights_shape=[1, 1, 1, 1]) + assert not support.is_operator_supported(op) + + +def test_constraint_fc_output_2d_is_supp(): + op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4]) + assert support.is_operator_supported(op) + op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024]) + assert support.is_operator_supported(op) + + def test_constraint_faf(): # Fused activation functions, if set, must be a valid op type op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8]) diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index ee407b6e..4b2938b9 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -20,6 +20,7 @@ import numpy as np from ethosu.vela import architecture_features from ethosu.vela.data_type import DataType from ethosu.vela.nn_graph import Subgraph +from ethosu.vela.operation import Op from ethosu.vela.operation import Operation from ethosu.vela.tensor import create_const_tensor from ethosu.vela.tensor import QuantizationParameters @@ -90,7 +91,8 @@ def create_op_with_quant_tensors( else: np_type = np.int32 qp = default_quant_params() - qp.zero_point = np.zeros(weights_shape) + if op.type is not Op.FullyConnected: + qp.zero_point = np.zeros(weights_shape) weights = create_const_tensor( "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp ) @@ -98,7 +100,8 @@ def create_op_with_quant_tensors( # Optional bias tensor if bias_shape is not None: qp = default_quant_params() - qp.zero_point = np.zeros(bias_shape) + if op.type is not Op.FullyConnected: + qp.zero_point = np.zeros(bias_shape) bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp) op.add_input_tensor(bias) return op |