aboutsummaryrefslogtreecommitdiff
path: root/ethosu
diff options
context:
space:
mode:
authorMichael McGeagh <michael.mcgeagh@arm.com>2020-09-08 11:07:35 +0100
committerMichael McGeagh <michael.mcgeagh@arm.com>2020-09-08 15:17:39 +0100
commit11b0bdbf029edc1e166f55a6da1bbaf40da068c4 (patch)
treeb9f74181ba26168ffdb2b210d70705c5bb7f0ad1 /ethosu
parent34ad19b6bf7152a2e15834d152fa7bfa3131039e (diff)
downloadethos-u-vela-11b0bdbf029edc1e166f55a6da1bbaf40da068c4.tar.gz
vela: Refactor op sets in graph_optimiser
We have a number of sets for grouping specific ops together but arent used that much in code. This updates the file to better utilise these sets. Signed-off-by: Michael McGeagh <michael.mcgeagh@arm.com> Change-Id: I719212671f8bdebc32576278f703549f0937ff65
Diffstat (limited to 'ethosu')
-rw-r--r--ethosu/vela/graph_optimiser.py62
1 files changed, 31 insertions, 31 deletions
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index b9110b8b..bd30fd3d 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -38,6 +38,32 @@ from .tensor import Tensor
passthrough_nodes = set(("Identity",))
+conv_op = set(("Conv2D", "QuantizedConv2D", "Conv2DBackpropInputSwitchedBias", "Conv2DBiasAct"))
+fc_op = set(
+ (
+ "MatMul",
+ "QuantizedMatMul",
+ "BlockLSTM",
+ "RnnAct",
+ "UnidirectionalSequenceRnnAct",
+ "BidirectionalSequenceRnnAct",
+ "LstmAct",
+ "UnidirectionalSequenceLstmAct",
+ "BidirectionalSequenceLstmAct",
+ "FullyConnectedAct",
+ )
+)
+depthwise_op = set(("DepthwiseConv2dNative", "DepthwiseConv2dBiasAct",))
+pool_op = set(
+ ("AvgPool", "MaxPool", "QuantizedAvgPool", "QuantizedMaxPool", "AvgPoolAct", "MaxPoolAct", "ResizeBilinear")
+)
+reduce_sum_ops = set(("ReduceSum",))
+binary_elementwise_op = set(("AddAct", "MulAct", "SubAct", "Maximum", "Minimum"))
+elementwise_op = set(("LeakyRelu", "Abs", "CLZ", "SHL", "SHR")) | binary_elementwise_op
+relu_ops = set(("Relu", "Relu6", "ReluN1To1"))
+activation_ops = set(("Sigmoid", "Tanh")) | relu_ops
+memory_only_ops = set(("Reshape",))
+
def remove_passthrough_tensor(tens, arch):
if len(tens.ops) == 1 and tens.ops[0].type in passthrough_nodes:
@@ -394,10 +420,10 @@ def fixup_unpack_output(tens, arch):
def add_padding_fields(op, arch):
if op.run_on_npu:
if "padding" in op.attrs:
- if "Conv" in op.type:
+ if op.type in conv_op | depthwise_op:
kernel_size = op.inputs[1].shape[:2]
input_shape = op.inputs[0].shape
- elif "Pool" in op.type or op.type in ("ResizeBilinear", "ReduceSum"):
+ elif op.type in pool_op | reduce_sum_ops:
kernel_size = op.attrs["ksize"][1:3]
input_shape = op.inputs[0].shape
elif op.type == "ExtractImagePatches":
@@ -424,32 +450,6 @@ def add_padding_fields(op, arch):
return op
-conv_op = set(("Conv2D", "QuantizedConv2D", "Conv2DBackpropInputSwitchedBias", "Conv2DBiasAct"))
-fc_op = set(
- (
- "MatMul",
- "QuantizedMatMul",
- "BlockLSTM",
- "RnnAct",
- "UnidirectionalSequenceRnnAct",
- "BidirectionalSequenceRnnAct",
- "LstmAct",
- "UnidirectionalSequenceLstmAct",
- "BidirectionalSequenceLstmAct",
- "FullyConnectedAct",
- )
-)
-depthwise_op = set(("DepthwiseConv2dNative", "DepthwiseConv2dBiasAct",))
-pool_op = set(
- ("AvgPool", "MaxPool", "QuantizedAvgPool", "QuantizedMaxPool", "AvgPoolAct", "MaxPoolAct", "ResizeBilinear")
-)
-reduce_sum_ops = set(("ReduceSum",))
-elementwise_op = set(("AddAct", "MulAct", "SubAct", "Maximum", "Minimum", "LeakyRelu", "Abs", "CLZ", "SHL", "SHR"))
-binary_elementwise_op = set(("AddAct", "MulAct", "SubAct", "Maximum", "Minimum"))
-activation_ops = set(("Relu", "Relu6", "ReluN1To1", "Sigmoid", "Tanh"))
-memory_only_ops = set(("Reshape",))
-
-
# Check if the op can be reordered
def get_prepend_op(op):
inp = op.inputs[0]
@@ -491,7 +491,7 @@ def convert_depthwise_to_conv(op, arch):
# If those conditions are true, then we can perform a simple
# switch of the operator type (and weight order)
- if ("DepthwiseConv2d" in op.type) and (op.attrs["depth_multiplier"] != 1):
+ if (op.type in depthwise_op) and (op.attrs["depth_multiplier"] != 1):
ifm_tensor = op.inputs[0]
weight_tensor = op.inputs[1]
ofm_tensor = op.outputs[0]
@@ -513,7 +513,7 @@ def convert_depthwise_to_conv(op, arch):
def reorder_depthwise_weights(op, arch):
- if "DepthwiseConv2d" in op.type:
+ if op.type in depthwise_op:
weight_tensor = op.inputs[1]
weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2))
weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
@@ -603,7 +603,7 @@ def fixup_elementwise_with_scalars(op, arch):
# Set input/output tensor equivalence to the same id for memory operations
def set_tensor_equivalence(op, arch):
- if op.type == "Reshape":
+ if op.type in memory_only_ops:
eid = op.outputs[0].equivalence_id
for inp in op.inputs:
inp.equivalence_id = eid