From b0b58d61adb32e6fb3704c60655b3a1a9d5d9afd Mon Sep 17 00:00:00 2001 From: Thibaut Goetghebuer-Planchon Date: Wed, 23 Nov 2022 17:33:59 +0000 Subject: Add names to layers to make tests more robust Change-Id: I64c9bc4b170f3e2147a5791c5aff477a44c06a2f --- tests/test_tosa_checker.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/tests/test_tosa_checker.py b/tests/test_tosa_checker.py index b24073a..52378ee 100644 --- a/tests/test_tosa_checker.py +++ b/tests/test_tosa_checker.py @@ -33,7 +33,9 @@ def build_tosa_non_compat_model(): scores_in = tf.keras.layers.Input( shape=(num_boxes), batch_size=1, dtype=tf.float32, name="scores" ) - outputs = tf.keras.layers.Lambda(non_max_suppression)([boxes_in, scores_in]) + outputs = tf.keras.layers.Lambda(non_max_suppression, name="nms")( + [boxes_in, scores_in] + ) model = tf.keras.models.Model(inputs=[boxes_in, scores_in], outputs=outputs) return model @@ -54,9 +56,9 @@ def build_tosa_non_compat_model_custom_op(): return x - input = tf.keras.layers.Input(shape=(16,)) - x = tf.keras.layers.Lambda(exp_log)(input) - x = tf.keras.layers.Dense(8, activation="relu")(x) + input = tf.keras.layers.Input(shape=(16,), name="input") + x = tf.keras.layers.Lambda(exp_log, name="exp_log")(input) + x = tf.keras.layers.Dense(8, activation="relu", name="dense")(x) model = tf.keras.models.Model(inputs=[input], outputs=x) return model @@ -64,8 +66,8 @@ def build_tosa_non_compat_model_custom_op(): @pytest.fixture(scope="module") def build_tosa_compat_model(): - input = tf.keras.layers.Input(shape=(16,)) - x = tf.keras.layers.Dense(8, activation="relu")(input) + input = tf.keras.layers.Input(shape=(16,), name="input") + x = tf.keras.layers.Dense(8, activation="relu", name="dense")(input) model = tf.keras.models.Model(inputs=[input], outputs=x) return model @@ -197,9 +199,10 @@ class TestTosaCompatibilityTool: tfl_mlir_representation = checker._get_mlir_model_representation( elide_large_elements_attrs=True ) + # TODO Use regular expression to make the test more robust or parse the MLIR module expected_mlir_representation = """\ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} { - func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor {tf_saved_model.index_path = ["lambda_1"]}, tensor {tf_saved_model.index_path = ["lambda"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { + func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor {tf_saved_model.index_path = ["nms_1"]}, tensor {tf_saved_model.index_path = ["nms"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { %0 = "tfl.pseudo_const"() {value = dense<0> : tensor<3xi32>} : () -> tensor<3xi32> %1 = "tfl.pseudo_const"() {value = dense<[1, 6, 4]> : tensor<3xi32>} : () -> tensor<3xi32> %2 = "tfl.pseudo_const"() {value = dense<1> : tensor<3xi32>} : () -> tensor<3xi32> @@ -224,7 +227,7 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted." ) expected_tosa_mlir_representation = """\ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} { - func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor {tf_saved_model.index_path = ["lambda_1"]}, tensor {tf_saved_model.index_path = ["lambda"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { + func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor {tf_saved_model.index_path = ["nms_1"]}, tensor {tf_saved_model.index_path = ["nms"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { %0 = "tosa.const"() {value = dense<5> : tensor} : () -> tensor %1 = "tosa.const"() {value = dense<5.000000e-01> : tensor} : () -> tensor %2 = "tosa.const"() {value = dense<1.000000e-01> : tensor} : () -> tensor @@ -245,7 +248,7 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted." ) expected_mlir_representation = """\ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} { - func @main(%arg0: tensor {tf_saved_model.index_path = ["input_1"]}) -> (tensor {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { + func @main(%arg0: tensor {tf_saved_model.index_path = ["input"]}) -> (tensor {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { %0 = "tfl.pseudo_const"() {value = opaque<"elided_large_const", "0xDEADBEEF"> : tensor<8x16xf32>} : () -> tensor<8x16xf32> %1 = "tfl.no_value"() {value} : () -> none %2 = "tfl.fully_connected"(%arg0, %0, %1) {asymmetric_quantize_inputs = false, fused_activation_function = "RELU", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor, tensor<8x16xf32>, none) -> tensor @@ -260,7 +263,7 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted." ) expected_tosa_mlir_representation = """\ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} { - func @main(%arg0: tensor {tf_saved_model.index_path = ["input_1"]}) -> (tensor {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { + func @main(%arg0: tensor {tf_saved_model.index_path = ["input"]}) -> (tensor {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { %0 = "tosa.const"() {value = opaque<"elided_large_const", "0xDEADBEEF"> : tensor<8x16xf32>} : () -> tensor<8x16xf32> %1 = "tosa.const"() {value = dense<0.000000e+00> : tensor<8xf32>} : () -> tensor<8xf32> %2 = "tosa.fully_connected"(%arg0, %0, %1) : (tensor, tensor<8x16xf32>, tensor<8xf32>) -> tensor -- cgit v1.2.1