aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDwight Lidman <dwight.lidman@arm.com>2020-11-24 13:45:50 +0100
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-11-26 07:19:01 +0000
commit0dd21c79ac6ef588e23393064d25e402e16cc2dd (patch)
tree6933d6bd1df37485f7537deed4b19c2e0af805f3
parent933f55ea6f686d0cf390f4767e87a391686c3df8 (diff)
downloadethos-u-vela-0dd21c79ac6ef588e23393064d25e402e16cc2dd.tar.gz
MLBEDSW-3558: Put FC on CPU when OFM != 2D
This commit adds a constraint to FullyConnected ops in supported_operators.py that puts any such op on the CPU if tensor dimensions of the output(s) are not 2D. Signed-off-by: Dwight Lidman <dwight.lidman@arm.com> Change-Id: I8c898a780b40fc4a1383c09213f0696ea6699b7d
-rw-r--r--ethosu/vela/supported_operators.py14
-rw-r--r--ethosu/vela/test/test_supported_operators.py16
-rw-r--r--ethosu/vela/test/testutil.py7
3 files changed, 35 insertions, 2 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index deae75a..f7dfec2 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -247,6 +247,9 @@ class SupportedOperators:
# LeakyRelu specific checks:
self.specific_constraints[Op.LeakyRelu].append(SupportedOperators.constraint_alpha_valid)
+ # FullyConnected specific checks:
+ self.specific_constraints[Op.FullyConnected].append(SupportedOperators.constraint_fc_output_2d)
+
def is_operator_supported(self, op):
ext_type = optype_to_builtintype(op.type)
if op.type not in SupportedOperators.supported_operators:
@@ -409,6 +412,17 @@ class SupportedOperators:
extra.append(tens.name)
return valid, "The following tensor(s) have per-axis quantization parameters: " + ", ".join(extra)
+ @staticmethod
+ def constraint_fc_output_2d(op):
+ "The output tensor(s) must have 2D shape"
+ valid = True
+ extra = []
+ for tens in op.outputs:
+ if len(tens.shape) != 2:
+ valid = False
+ extra.append(f"Tensor '{tens.name}' is {len(tens.shape)}D")
+ return valid, ", ".join(extra)
+
@classmethod
@docstring_format_args([_optype_formatter(supported_fused_activations)])
def constraint_faf(cls, op):
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index 72ccad2..f132eef 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -122,6 +122,22 @@ def test_constraint_tens_quant_per_axis_is_supp():
assert support.is_operator_supported(op)
+def test_constraint_fc_output_2d_not_supp():
+ op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1], [3, 2, 2, 1], weights_shape=[12, 1, 1, 1])
+ assert not support.is_operator_supported(op)
+ op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1, 1, 1], [1, 3, 4], weights_shape=[12, 1, 1, 1])
+ assert not support.is_operator_supported(op)
+ op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1, 1, 1], [1], weights_shape=[1, 1, 1, 1])
+ assert not support.is_operator_supported(op)
+
+
+def test_constraint_fc_output_2d_is_supp():
+ op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
+ assert support.is_operator_supported(op)
+ op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
+ assert support.is_operator_supported(op)
+
+
def test_constraint_faf():
# Fused activation functions, if set, must be a valid op type
op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py
index ee407b6..4b2938b 100644
--- a/ethosu/vela/test/testutil.py
+++ b/ethosu/vela/test/testutil.py
@@ -20,6 +20,7 @@ import numpy as np
from ethosu.vela import architecture_features
from ethosu.vela.data_type import DataType
from ethosu.vela.nn_graph import Subgraph
+from ethosu.vela.operation import Op
from ethosu.vela.operation import Operation
from ethosu.vela.tensor import create_const_tensor
from ethosu.vela.tensor import QuantizationParameters
@@ -90,7 +91,8 @@ def create_op_with_quant_tensors(
else:
np_type = np.int32
qp = default_quant_params()
- qp.zero_point = np.zeros(weights_shape)
+ if op.type is not Op.FullyConnected:
+ qp.zero_point = np.zeros(weights_shape)
weights = create_const_tensor(
"weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
)
@@ -98,7 +100,8 @@ def create_op_with_quant_tensors(
# Optional bias tensor
if bias_shape is not None:
qp = default_quant_params()
- qp.zero_point = np.zeros(bias_shape)
+ if op.type is not Op.FullyConnected:
+ qp.zero_point = np.zeros(bias_shape)
bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
op.add_input_tensor(bias)
return op