aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThibaut Goetghebuer-Planchon <thibaut.goetghebuer-planchon@arm.com>2022-11-23 17:33:59 +0000
committerThibaut Goetghebuer-Planchon <thibaut.goetghebuer-planchon@arm.com>2022-12-13 10:54:03 +0000
commitb0b58d61adb32e6fb3704c60655b3a1a9d5d9afd (patch)
treec63a851c3de7d991891fa058d3a0e6da97086fb0
parent9fb745d92173abfa270e99bd5c9bd7cf85bfeb31 (diff)
downloadtosa_checker-b0b58d61adb32e6fb3704c60655b3a1a9d5d9afd.tar.gz
Add names to layers to make tests more robust
Change-Id: I64c9bc4b170f3e2147a5791c5aff477a44c06a2f
-rw-r--r--tests/test_tosa_checker.py23
1 files changed, 13 insertions, 10 deletions
diff --git a/tests/test_tosa_checker.py b/tests/test_tosa_checker.py
index b24073a..52378ee 100644
--- a/tests/test_tosa_checker.py
+++ b/tests/test_tosa_checker.py
@@ -33,7 +33,9 @@ def build_tosa_non_compat_model():
scores_in = tf.keras.layers.Input(
shape=(num_boxes), batch_size=1, dtype=tf.float32, name="scores"
)
- outputs = tf.keras.layers.Lambda(non_max_suppression)([boxes_in, scores_in])
+ outputs = tf.keras.layers.Lambda(non_max_suppression, name="nms")(
+ [boxes_in, scores_in]
+ )
model = tf.keras.models.Model(inputs=[boxes_in, scores_in], outputs=outputs)
return model
@@ -54,9 +56,9 @@ def build_tosa_non_compat_model_custom_op():
return x
- input = tf.keras.layers.Input(shape=(16,))
- x = tf.keras.layers.Lambda(exp_log)(input)
- x = tf.keras.layers.Dense(8, activation="relu")(x)
+ input = tf.keras.layers.Input(shape=(16,), name="input")
+ x = tf.keras.layers.Lambda(exp_log, name="exp_log")(input)
+ x = tf.keras.layers.Dense(8, activation="relu", name="dense")(x)
model = tf.keras.models.Model(inputs=[input], outputs=x)
return model
@@ -64,8 +66,8 @@ def build_tosa_non_compat_model_custom_op():
@pytest.fixture(scope="module")
def build_tosa_compat_model():
- input = tf.keras.layers.Input(shape=(16,))
- x = tf.keras.layers.Dense(8, activation="relu")(input)
+ input = tf.keras.layers.Input(shape=(16,), name="input")
+ x = tf.keras.layers.Dense(8, activation="relu", name="dense")(input)
model = tf.keras.models.Model(inputs=[input], outputs=x)
return model
@@ -197,9 +199,10 @@ class TestTosaCompatibilityTool:
tfl_mlir_representation = checker._get_mlir_model_representation(
elide_large_elements_attrs=True
)
+ # TODO Use regular expression to make the test more robust or parse the MLIR module
expected_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
- func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor<?xf32> {tf_saved_model.index_path = ["lambda_1"]}, tensor<?xi32> {tf_saved_model.index_path = ["lambda"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
+ func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor<?xf32> {tf_saved_model.index_path = ["nms_1"]}, tensor<?xi32> {tf_saved_model.index_path = ["nms"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tfl.pseudo_const"() {value = dense<0> : tensor<3xi32>} : () -> tensor<3xi32>
%1 = "tfl.pseudo_const"() {value = dense<[1, 6, 4]> : tensor<3xi32>} : () -> tensor<3xi32>
%2 = "tfl.pseudo_const"() {value = dense<1> : tensor<3xi32>} : () -> tensor<3xi32>
@@ -224,7 +227,7 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted."
)
expected_tosa_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
- func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor<?xf32> {tf_saved_model.index_path = ["lambda_1"]}, tensor<?xi32> {tf_saved_model.index_path = ["lambda"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
+ func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor<?xf32> {tf_saved_model.index_path = ["nms_1"]}, tensor<?xi32> {tf_saved_model.index_path = ["nms"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tosa.const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32>
%1 = "tosa.const"() {value = dense<5.000000e-01> : tensor<f32>} : () -> tensor<f32>
%2 = "tosa.const"() {value = dense<1.000000e-01> : tensor<f32>} : () -> tensor<f32>
@@ -245,7 +248,7 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted."
)
expected_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
- func @main(%arg0: tensor<?x16xf32> {tf_saved_model.index_path = ["input_1"]}) -> (tensor<?x8xf32> {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
+ func @main(%arg0: tensor<?x16xf32> {tf_saved_model.index_path = ["input"]}) -> (tensor<?x8xf32> {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tfl.pseudo_const"() {value = opaque<"elided_large_const", "0xDEADBEEF"> : tensor<8x16xf32>} : () -> tensor<8x16xf32>
%1 = "tfl.no_value"() {value} : () -> none
%2 = "tfl.fully_connected"(%arg0, %0, %1) {asymmetric_quantize_inputs = false, fused_activation_function = "RELU", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<?x16xf32>, tensor<8x16xf32>, none) -> tensor<?x8xf32>
@@ -260,7 +263,7 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted."
)
expected_tosa_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
- func @main(%arg0: tensor<?x16xf32> {tf_saved_model.index_path = ["input_1"]}) -> (tensor<?x8xf32> {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
+ func @main(%arg0: tensor<?x16xf32> {tf_saved_model.index_path = ["input"]}) -> (tensor<?x8xf32> {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tosa.const"() {value = opaque<"elided_large_const", "0xDEADBEEF"> : tensor<8x16xf32>} : () -> tensor<8x16xf32>
%1 = "tosa.const"() {value = dense<0.000000e+00> : tensor<8xf32>} : () -> tensor<8xf32>
%2 = "tosa.fully_connected"(%arg0, %0, %1) : (tensor<?x16xf32>, tensor<8x16xf32>, tensor<8xf32>) -> tensor<?x8xf32>