From aee5d7537ff81ffda5ba222721b72f914ce50fb8 Mon Sep 17 00:00:00 2001 From: Louis Verhaard Date: Wed, 30 Sep 2020 09:01:52 +0200 Subject: MLBEDSW-3148: Refactor Operation - op.type is now an enum instead of a string - Removed unused operator codes - Refactored some attributes like npu_block_type, fused_activation_function - Refactored operator index calculation - Refactored a number of operator sets Change-Id: I641f65ee375794b7aec42abc0664251ae37d78e8 Signed-off-by: Louis Verhaard --- ethosu/vela/test/test_lut.py | 33 ++++++++++++++-------------- ethosu/vela/test/test_supported_operators.py | 28 ++++++++++++----------- ethosu/vela/test/test_tflite_reader.py | 17 +++++++------- ethosu/vela/test/testutil.py | 2 -- 4 files changed, 41 insertions(+), 39 deletions(-) (limited to 'ethosu/vela/test') diff --git a/ethosu/vela/test/test_lut.py b/ethosu/vela/test/test_lut.py index ee1a40fe..44ee0afb 100644 --- a/ethosu/vela/test/test_lut.py +++ b/ethosu/vela/test/test_lut.py @@ -26,6 +26,7 @@ from ethosu.vela import pass_packing from ethosu.vela.data_type import DataType from ethosu.vela.high_level_command_stream import DMA from ethosu.vela.nn_graph import Graph +from ethosu.vela.operation import Op from ethosu.vela.rewrite_graph import verify_graph_health from ethosu.vela.tensor import create_const_tensor from ethosu.vela.tensor import TensorPurpose @@ -94,28 +95,28 @@ def test_optimize_high_level_cmd_stream_2K(): arch = testutil.create_arch() shape = [1, 1, 1, 1] # u8 LUT op, should lead to DMA - op0 = testutil.create_elemwise_op("AddAct", "op0", shape, shape, shape) + op0 = testutil.create_elemwise_op(Op.Add, "op0", shape, shape, shape) set_256_lut(op0, "lut0") # u8 LUT op, should lead to DMA - op1 = testutil.create_elemwise_op("AddAct", "op1", shape, shape, shape) + op1 = testutil.create_elemwise_op(Op.Add, "op1", shape, shape, shape) set_256_lut(op1, "lut1") # u8 LUT op with different LUT, should lead to DMA - op2 = testutil.create_elemwise_op("AddAct", "op2", shape, shape, shape) + op2 = testutil.create_elemwise_op(Op.Add, "op2", shape, shape, shape) set_256_lut(op2, "lut2") # u8 LUT op with same LUT as in op1, should not lead to DMA - op3 = testutil.create_elemwise_op("AddAct", "op3", shape, shape, shape) + op3 = testutil.create_elemwise_op(Op.Add, "op3", shape, shape, shape) set_256_lut(op3, "lut1") # u8 LUT op with same LUT as in op2, should not lead to DMA - op4 = testutil.create_elemwise_op("AddAct", "op4", shape, shape, shape) + op4 = testutil.create_elemwise_op(Op.Add, "op4", shape, shape, shape) set_256_lut(op4, "lut2") # 2K LUT op, should lead to DMA, and will overwrite all previous LUTs in SHRAM - op5_2K = testutil.create_elemwise_op("AddAct", "op5", shape, shape, shape) + op5_2K = testutil.create_elemwise_op(Op.Add, "op5", shape, shape, shape) set_2K_lut(op5_2K, "lut5") # Another 2K LUT op, should lead to DMA, and will overwrite the previous LUT in SHRAM - op6_2K = testutil.create_elemwise_op("AddAct", "op6", shape, shape, shape) + op6_2K = testutil.create_elemwise_op(Op.Add, "op6", shape, shape, shape) set_2K_lut(op6_2K, "lut6") # u8 LUT op with same LUT as in op1, should lead to DMA - op7 = testutil.create_elemwise_op("AddAct", "op7", shape, shape, shape) + op7 = testutil.create_elemwise_op(Op.Add, "op7", shape, shape, shape) set_256_lut(op7, "lut1") op_list = [op0, op1, op2, op3, op4, op5_2K, op6_2K, op7] @@ -149,28 +150,28 @@ def test_optimize_high_level_cmd_stream_1K(): arch = testutil.create_arch() shape = [1, 1, 1, 1] # u8 LUT op, should lead to DMA - op0 = testutil.create_elemwise_op("AddAct", "op0", shape, shape, shape) + op0 = testutil.create_elemwise_op(Op.Add, "op0", shape, shape, shape) set_256_lut(op0, "lut0") # u8 LUT op, should lead to DMA - op1 = testutil.create_elemwise_op("AddAct", "op1", shape, shape, shape) + op1 = testutil.create_elemwise_op(Op.Add, "op1", shape, shape, shape) set_256_lut(op1, "lut1") # 1K LUT op with different LUT, should lead to DMA - op2_1K = testutil.create_elemwise_op("AddAct", "op2", shape, shape, shape) + op2_1K = testutil.create_elemwise_op(Op.Add, "op2", shape, shape, shape) set_1K_lut(op2_1K, "lut2") # u8 LUT op with same LUT as in op1, should not lead to DMA - op3 = testutil.create_elemwise_op("AddAct", "op3", shape, shape, shape) + op3 = testutil.create_elemwise_op(Op.Add, "op3", shape, shape, shape) set_256_lut(op3, "lut1") # 1K LUT op with same LUT as in op2, should not lead to DMA - op4_1K = testutil.create_elemwise_op("AddAct", "op4", shape, shape, shape) + op4_1K = testutil.create_elemwise_op(Op.Add, "op4", shape, shape, shape) set_1K_lut(op4_1K, "lut2") # 1K LUT op, should lead to DMA, and will overwrite lut2 - op5_2K = testutil.create_elemwise_op("AddAct", "op5", shape, shape, shape) + op5_2K = testutil.create_elemwise_op(Op.Add, "op5", shape, shape, shape) set_1K_lut(op5_2K, "lut5") # u8 LUT op, lut0 should still be present, should not lead to DMA - op6 = testutil.create_elemwise_op("AddAct", "op6", shape, shape, shape) + op6 = testutil.create_elemwise_op(Op.Add, "op6", shape, shape, shape) set_256_lut(op6, "lut0") # 1K LUT op with same LUT as in op2, should lead to DMA - op7 = testutil.create_elemwise_op("AddAct", "op7", shape, shape, shape) + op7 = testutil.create_elemwise_op(Op.Add, "op7", shape, shape, shape) set_1K_lut(op7, "lut2") op_list = [op0, op1, op2_1K, op3, op4_1K, op5_2K, op6, op7] diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py index 53c20927..20d448d7 100644 --- a/ethosu/vela/test/test_supported_operators.py +++ b/ethosu/vela/test/test_supported_operators.py @@ -19,6 +19,7 @@ import numpy as np from ethosu.vela.data_type import DataType +from ethosu.vela.operation import Op from ethosu.vela.supported_operators import SupportedOperators from ethosu.vela.tensor import create_const_tensor from ethosu.vela.tensor import QuantizationParameters @@ -35,7 +36,7 @@ def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets): in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1]) out = Tensor(out_shape, DataType.uint8, "out") attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0} - return testutil.create_op("StridedSlice", [in0, in1, in2, in3], out, attrs=attrs) + return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs) def create_strided_slice(): @@ -93,21 +94,21 @@ def test_constraint_tens_defined_shape(): # Tensors cannot have None in them inp = Tensor([1, 8, None, 8], DataType.uint8, "in") out = Tensor([1, 8, 8, 8], DataType.uint8, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) def test_constraint_tens_shapeless(): # Shapeless input is allowed if its of a certain type: - op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) assert support.is_operator_supported(op) # Shapeless output is not allowed at all: - op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [1, 8, 8, 8], []) + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [1, 8, 8, 8], []) assert not support.is_operator_supported(op) # Invalid shapeless input due to op type: inp = Tensor([], DataType.uint8, "in") out = Tensor([1, 8, 8, 8], DataType.uint8, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) @@ -115,7 +116,7 @@ def test_constraint_tens_shape_size(): # Tensors cannot be > 4D inp = Tensor([1, 1, 8, 8, 8], DataType.uint8, "in") out = Tensor([1, 1, 8, 8, 8], DataType.uint8, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) @@ -123,14 +124,14 @@ def test_constraint_tens_dtype(): # Tensors can only be of type uint8, int8, int16 (and int32) inp = Tensor([1, 8, 8, 8], DataType.float32, "in") out = Tensor([1, 8, 8, 8], DataType.float32, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) # For int32, only select op types are allowed: - op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], DataType.int32) + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], DataType.int32) assert support.is_operator_supported(op) inp = Tensor([1, 8, 8, 8], DataType.int32, "in") out = Tensor([1, 8, 8, 8], DataType.int32, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) @@ -138,11 +139,11 @@ def test_constraint_tens_dimension(): # Tensors can only have values in the inclusive range of 1-65535 inp = Tensor([1, 8, 8, 0], DataType.uint8, "in") out = Tensor([1, 8, 8, 0], DataType.uint8, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) inp = Tensor([1, 8, 8, 65536], DataType.uint8, "in") out = Tensor([1, 8, 8, 65536], DataType.uint8, "out") - op = testutil.create_op("Relu", [inp], out) + op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) @@ -150,13 +151,14 @@ def test_constraint_faf(): # Fused activation functions, if set, must be a valid op type inp = Tensor([1, 8, 8, 8], DataType.uint8, "in") out = Tensor([1, 8, 8, 8], DataType.uint8, "out") - op = testutil.create_op("Relu", [inp], out, attrs={"fused_activation_function": "Conv2D"}) + op = testutil.create_op(Op.Relu, [inp], out) + op.activation = Op.Conv2D assert not support.is_operator_supported(op) def test_constraint_tens_quant_scale(): # Quantization scale cannot be infinit - op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) op.inputs[0].quantization = QuantizationParameters() op.inputs[0].quantization.scale_f32 = np.inf assert not support.is_operator_supported(op) diff --git a/ethosu/vela/test/test_tflite_reader.py b/ethosu/vela/test/test_tflite_reader.py index d63c0007..23abb4a0 100644 --- a/ethosu/vela/test/test_tflite_reader.py +++ b/ethosu/vela/test/test_tflite_reader.py @@ -20,6 +20,7 @@ from unittest.mock import patch import pytest +from ethosu.vela.operation import Op from ethosu.vela.tflite_reader import TFLiteSubgraph @@ -41,13 +42,13 @@ class TestTFLiteSubgraph: parse_op_testdata = [ # op_type, opt_serializer, inputs, output, expected - ("FullyConnected", None, [0, 1, 2], 3, 3), # FC - ("FullyConnected", None, [0, 1, -1], 3, 3), # FC disabled Bias - ("FullyConnected", None, [0, 1], 3, 3), # FC no Bias - ("Conv2D", None, [2, 1, 3], 0, 3), # Conv2D - ("Conv2DBackprop", None, [0, 1, 2, 3], 4, 4), # TransposeConv - ("Conv2DBackprop", None, [0, 1, 2], 4, 4), # TransposeConv no Bias - pytest.param("Conv2D", None, [0, -1, 1], 3, 3, marks=pytest.mark.xfail), # Conv2D no Weights + (Op.FullyConnected, None, [0, 1, 2], 3, 3), # FC + (Op.FullyConnected, None, [0, 1, -1], 3, 3), # FC disabled Bias + (Op.FullyConnected, None, [0, 1], 3, 3), # FC no Bias + (Op.Conv2D, None, [2, 1, 3], 0, 3), # Conv2D + (Op.Conv2DBackpropInput, None, [0, 1, 2, 3], 4, 4), # TransposeConv + (Op.Conv2DBackpropInput, None, [0, 1, 2], 4, 4), # TransposeConv no Bias + pytest.param(Op.Conv2D, None, [0, -1, 1], 3, 3, marks=pytest.mark.xfail), # Conv2D no Weights ] @pytest.mark.parametrize("op_type, opt_serializer, inputs, output, expected", parse_op_testdata) @@ -56,7 +57,7 @@ class TestTFLiteSubgraph: # Mock a TFLiteSubGraph sg = TFLiteSubgraph(None, None) sg.graph = MagicMock() - sg.graph.operator_codes = [(op_type, opt_serializer)] + sg.graph.operator_codes = [(op_type, opt_serializer, "")] # Mock a couple of tensors sg.tensors = [MagicMock() for _ in range(5)] diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index 13b6bf40..adb874a0 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -20,7 +20,6 @@ import numpy as np from ethosu.vela import architecture_features from ethosu.vela.data_type import DataType from ethosu.vela.nn_graph import Subgraph -from ethosu.vela.operation import NpuBlockType from ethosu.vela.operation import Operation from ethosu.vela.tensor import create_const_tensor from ethosu.vela.tensor import Tensor @@ -52,7 +51,6 @@ def create_elemwise_op(type, name, ifm_shape, ifm2_shape, ofm_shape, datatype=Da op.add_input_tensor(create_const_tensor(name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), np_type)) ofm = Tensor(ofm_shape, datatype, name + "_ofm") op.set_output_tensor(ofm) - op.attrs["npu_block_type"] = NpuBlockType.ElementWise return op -- cgit v1.2.1