aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-24 15:08:08 +0100
committerDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-26 17:08:13 +0100
commit58a65fee574c00329cf92b387a6d2513dcbf6100 (patch)
tree47e3185f78b4298ab029785ddee68456e44cac10 /tests
parent9d34cb72d45a6d0a2ec1063ebf32536c1efdba75 (diff)
downloadmlia-58a65fee574c00329cf92b387a6d2513dcbf6100.tar.gz
MLIA-433 Add TensorFlow Lite compatibility check
- Add ability to intercept low level TensorFlow output - Produce advice for the models that could not be converted to the TensorFlow Lite format - Refactor utility functions for TensorFlow Lite conversion - Add TensorFlow Lite compatibility checker Change-Id: I47d120d2619ced7b143bc92c5184515b81c0220d
Diffstat (limited to 'tests')
-rw-r--r--tests/test_devices_cortex_a_data_analysis.py35
-rw-r--r--tests/test_devices_cortexa_advice_generation.py (renamed from tests/test_devices_cortex_a_advice_generation.py)38
-rw-r--r--tests/test_devices_cortexa_data_analysis.py72
-rw-r--r--tests/test_devices_cortexa_data_collection.py (renamed from tests/test_devices_cortex_a_data_collection.py)0
-rw-r--r--tests/test_nn_tensorflow_tflite_compat.py210
-rw-r--r--tests/test_nn_tensorflow_utils.py44
-rw-r--r--tests/test_utils_logging.py24
7 files changed, 383 insertions, 40 deletions
diff --git a/tests/test_devices_cortex_a_data_analysis.py b/tests/test_devices_cortex_a_data_analysis.py
deleted file mode 100644
index 4724c81..0000000
--- a/tests/test_devices_cortex_a_data_analysis.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for Cortex-A data analysis module."""
-from __future__ import annotations
-
-import pytest
-
-from mlia.core.common import DataItem
-from mlia.core.data_analysis import Fact
-from mlia.devices.cortexa.data_analysis import CortexADataAnalyzer
-from mlia.devices.cortexa.data_analysis import ModelIsCortexACompatible
-from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible
-from mlia.devices.cortexa.operators import CortexACompatibilityInfo
-
-
-@pytest.mark.parametrize(
- "input_data, expected_facts",
- [
- [
- CortexACompatibilityInfo(True, []),
- [ModelIsCortexACompatible()],
- ],
- [
- CortexACompatibilityInfo(False, []),
- [ModelIsNotCortexACompatible()],
- ],
- ],
-)
-def test_cortex_a_data_analyzer(
- input_data: DataItem, expected_facts: list[Fact]
-) -> None:
- """Test Cortex-A data analyzer."""
- analyzer = CortexADataAnalyzer()
- analyzer.analyze_data(input_data)
- assert analyzer.get_analyzed_data() == expected_facts
diff --git a/tests/test_devices_cortex_a_advice_generation.py b/tests/test_devices_cortexa_advice_generation.py
index 69529d4..ead8ae6 100644
--- a/tests/test_devices_cortex_a_advice_generation.py
+++ b/tests/test_devices_cortexa_advice_generation.py
@@ -12,6 +12,7 @@ from mlia.core.context import ExecutionContext
from mlia.devices.cortexa.advice_generation import CortexAAdviceProducer
from mlia.devices.cortexa.data_analysis import ModelIsCortexACompatible
from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible
+from mlia.devices.cortexa.data_analysis import ModelIsNotTFLiteCompatible
@pytest.mark.parametrize(
@@ -34,6 +35,43 @@ from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible
AdviceCategory.OPERATORS,
[Advice(["Model is fully compatible with Cortex-A."])],
],
+ [
+ ModelIsNotTFLiteCompatible(
+ flex_ops=["flex_op1", "flex_op2"],
+ custom_ops=["custom_op1", "custom_op2"],
+ ),
+ AdviceCategory.OPERATORS,
+ [
+ Advice(
+ [
+ "The following operators are not natively "
+ "supported by TensorFlow Lite: flex_op1, flex_op2.",
+ "Please refer to the TensorFlow documentation for "
+ "more details.",
+ ]
+ ),
+ Advice(
+ [
+ "The following operators are custom and not natively "
+ "supported by TensorFlow Lite: custom_op1, custom_op2.",
+ "Please refer to the TensorFlow documentation for "
+ "more details.",
+ ]
+ ),
+ ],
+ ],
+ [
+ ModelIsNotTFLiteCompatible(),
+ AdviceCategory.OPERATORS,
+ [
+ Advice(
+ [
+ "Model could not be converted into TensorFlow Lite format.",
+ "Please refer to the table for more details.",
+ ]
+ ),
+ ],
+ ],
],
)
def test_cortex_a_advice_producer(
diff --git a/tests/test_devices_cortexa_data_analysis.py b/tests/test_devices_cortexa_data_analysis.py
new file mode 100644
index 0000000..b491e52
--- /dev/null
+++ b/tests/test_devices_cortexa_data_analysis.py
@@ -0,0 +1,72 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for Cortex-A data analysis module."""
+from __future__ import annotations
+
+import pytest
+
+from mlia.core.common import DataItem
+from mlia.core.data_analysis import Fact
+from mlia.devices.cortexa.data_analysis import CortexADataAnalyzer
+from mlia.devices.cortexa.data_analysis import ModelIsCortexACompatible
+from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible
+from mlia.devices.cortexa.data_analysis import ModelIsNotTFLiteCompatible
+from mlia.devices.cortexa.operators import CortexACompatibilityInfo
+from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo
+from mlia.nn.tensorflow.tflite_compat import TFLiteConversionError
+from mlia.nn.tensorflow.tflite_compat import TFLiteConversionErrorCode
+
+
+@pytest.mark.parametrize(
+ "input_data, expected_facts",
+ [
+ [
+ CortexACompatibilityInfo(True, []),
+ [ModelIsCortexACompatible()],
+ ],
+ [
+ CortexACompatibilityInfo(False, []),
+ [ModelIsNotCortexACompatible()],
+ ],
+ [
+ TFLiteCompatibilityInfo(compatible=True),
+ [],
+ ],
+ [
+ TFLiteCompatibilityInfo(compatible=False),
+ [ModelIsNotTFLiteCompatible(custom_ops=[], flex_ops=[])],
+ ],
+ [
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_errors=[
+ TFLiteConversionError(
+ "error",
+ TFLiteConversionErrorCode.NEEDS_CUSTOM_OPS,
+ "custom_op1",
+ [],
+ ),
+ TFLiteConversionError(
+ "error",
+ TFLiteConversionErrorCode.NEEDS_FLEX_OPS,
+ "flex_op1",
+ [],
+ ),
+ ],
+ ),
+ [
+ ModelIsNotTFLiteCompatible(
+ custom_ops=["custom_op1"],
+ flex_ops=["flex_op1"],
+ )
+ ],
+ ],
+ ],
+)
+def test_cortex_a_data_analyzer(
+ input_data: DataItem, expected_facts: list[Fact]
+) -> None:
+ """Test Cortex-A data analyzer."""
+ analyzer = CortexADataAnalyzer()
+ analyzer.analyze_data(input_data)
+ assert analyzer.get_analyzed_data() == expected_facts
diff --git a/tests/test_devices_cortex_a_data_collection.py b/tests/test_devices_cortexa_data_collection.py
index 7ea3e52..7ea3e52 100644
--- a/tests/test_devices_cortex_a_data_collection.py
+++ b/tests/test_devices_cortexa_data_collection.py
diff --git a/tests/test_nn_tensorflow_tflite_compat.py b/tests/test_nn_tensorflow_tflite_compat.py
new file mode 100644
index 0000000..c330fdb
--- /dev/null
+++ b/tests/test_nn_tensorflow_tflite_compat.py
@@ -0,0 +1,210 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for tflite_compat module."""
+from __future__ import annotations
+
+from unittest.mock import MagicMock
+
+import pytest
+import tensorflow as tf
+from tensorflow.lite.python import convert
+from tensorflow.lite.python.metrics import converter_error_data_pb2
+
+from mlia.nn.tensorflow.tflite_compat import TFLiteChecker
+from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo
+from mlia.nn.tensorflow.tflite_compat import TFLiteConversionError
+from mlia.nn.tensorflow.tflite_compat import TFLiteConversionErrorCode
+
+
+def test_not_fully_compatible_model_flex_ops() -> None:
+ """Test models that requires TF_SELECT_OPS."""
+ model = tf.keras.models.Sequential(
+ [
+ tf.keras.layers.Dense(units=1, input_shape=[1], batch_size=1),
+ tf.keras.layers.Dense(units=16, activation="gelu"),
+ tf.keras.layers.Dense(units=1),
+ ]
+ )
+
+ checker = TFLiteChecker()
+ result = checker.check_compatibility(model)
+
+ assert result.compatible is False
+ assert isinstance(result.conversion_exception, convert.ConverterError)
+ assert result.conversion_errors is not None
+ assert len(result.conversion_errors) == 1
+
+ conv_err = result.conversion_errors[0]
+ assert isinstance(conv_err, TFLiteConversionError)
+ assert conv_err.message == "'tf.Erf' op is neither a custom op nor a flex op"
+ assert conv_err.code == TFLiteConversionErrorCode.NEEDS_FLEX_OPS
+ assert conv_err.operator == "tf.Erf"
+ assert len(conv_err.location) == 3
+
+
+def _get_tflite_conversion_error(
+ error_message: str = "Conversion error",
+ custom_op: bool = False,
+ flex_op: bool = False,
+ unsupported_flow_v1: bool = False,
+ gpu_not_compatible: bool = False,
+ unknown_reason: bool = False,
+) -> convert.ConverterError:
+ """Create TensorFlow Lite conversion error."""
+ error_data = converter_error_data_pb2.ConverterErrorData
+ convert_error = convert.ConverterError(error_message)
+
+ # pylint: disable=no-member
+ def _add_error(operator: str, error_code: int) -> None:
+ convert_error.append_error(
+ error_data(
+ operator=error_data.Operator(name=operator),
+ error_code=error_code,
+ error_message=error_message,
+ )
+ )
+
+ if custom_op:
+ _add_error("custom_op", error_data.ERROR_NEEDS_CUSTOM_OPS)
+
+ if flex_op:
+ _add_error("flex_op", error_data.ERROR_NEEDS_FLEX_OPS)
+
+ if unsupported_flow_v1:
+ _add_error("flow_op", error_data.ERROR_UNSUPPORTED_CONTROL_FLOW_V1)
+
+ if gpu_not_compatible:
+ _add_error("non_gpu_op", error_data.ERROR_GPU_NOT_COMPATIBLE)
+
+ if unknown_reason:
+ _add_error("unknown_op", None) # type: ignore
+ # pylint: enable=no-member
+
+ return convert_error
+
+
+# pylint: disable=undefined-variable,unused-variable
+@pytest.mark.parametrize(
+ "conversion_error, expected_result",
+ [
+ (None, TFLiteCompatibilityInfo(compatible=True)),
+ (
+ err := _get_tflite_conversion_error(custom_op=True),
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_exception=err,
+ conversion_errors=[
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.NEEDS_CUSTOM_OPS,
+ operator="custom_op",
+ location=[],
+ )
+ ],
+ ),
+ ),
+ (
+ err := _get_tflite_conversion_error(flex_op=True),
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_exception=err,
+ conversion_errors=[
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.NEEDS_FLEX_OPS,
+ operator="flex_op",
+ location=[],
+ )
+ ],
+ ),
+ ),
+ (
+ err := _get_tflite_conversion_error(unknown_reason=True),
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_exception=err,
+ conversion_errors=[
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.UNKNOWN,
+ operator="unknown_op",
+ location=[],
+ )
+ ],
+ ),
+ ),
+ (
+ err := _get_tflite_conversion_error(
+ flex_op=True,
+ custom_op=True,
+ gpu_not_compatible=True,
+ unsupported_flow_v1=True,
+ ),
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_exception=err,
+ conversion_errors=[
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.NEEDS_CUSTOM_OPS,
+ operator="custom_op",
+ location=[],
+ ),
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.NEEDS_FLEX_OPS,
+ operator="flex_op",
+ location=[],
+ ),
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.UNSUPPORTED_CONTROL_FLOW_V1,
+ operator="flow_op",
+ location=[],
+ ),
+ TFLiteConversionError(
+ message="Conversion error",
+ code=TFLiteConversionErrorCode.GPU_NOT_COMPATIBLE,
+ operator="non_gpu_op",
+ location=[],
+ ),
+ ],
+ ),
+ ),
+ (
+ err := _get_tflite_conversion_error(),
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_exception=err,
+ conversion_errors=[],
+ ),
+ ),
+ (
+ err := ValueError("Some unknown issue"),
+ TFLiteCompatibilityInfo(
+ compatible=False,
+ conversion_exception=err,
+ ),
+ ),
+ ],
+)
+# pylint: enable=undefined-variable,unused-variable
+def test_tflite_compatibility(
+ conversion_error: convert.ConverterError | ValueError | None,
+ expected_result: TFLiteCompatibilityInfo,
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ """Test TensorFlow Lite compatibility."""
+ converter_mock = MagicMock()
+
+ if conversion_error is not None:
+ converter_mock.convert.side_effect = conversion_error
+
+ monkeypatch.setattr(
+ "mlia.nn.tensorflow.tflite_compat.get_tflite_converter",
+ lambda *args, **kwargs: converter_mock,
+ )
+
+ checker = TFLiteChecker()
+ result = checker.check_compatibility(MagicMock())
+ assert result == expected_result
diff --git a/tests/test_nn_tensorflow_utils.py b/tests/test_nn_tensorflow_utils.py
index 199c7db..5131171 100644
--- a/tests/test_nn_tensorflow_utils.py
+++ b/tests/test_nn_tensorflow_utils.py
@@ -3,6 +3,7 @@
"""Test for module utils/test_utils."""
from pathlib import Path
+import numpy as np
import pytest
import tensorflow as tf
@@ -10,16 +11,43 @@ from mlia.nn.tensorflow.utils import convert_to_tflite
from mlia.nn.tensorflow.utils import get_tf_tensor_shape
from mlia.nn.tensorflow.utils import is_keras_model
from mlia.nn.tensorflow.utils import is_tflite_model
+from mlia.nn.tensorflow.utils import representative_dataset
from mlia.nn.tensorflow.utils import save_keras_model
from mlia.nn.tensorflow.utils import save_tflite_model
-def test_convert_to_tflite(test_keras_model: Path) -> None:
+def test_generate_representative_dataset() -> None:
+ """Test function for generating representative dataset."""
+ dataset = representative_dataset([1, 3, 3], 5)
+ data = list(dataset())
+
+ assert len(data) == 5
+ for elem in data:
+ assert isinstance(elem, list)
+ assert len(elem) == 1
+
+ ndarray = elem[0]
+ assert ndarray.dtype == np.float32
+ assert isinstance(ndarray, np.ndarray)
+
+
+def test_generate_representative_dataset_wrong_shape() -> None:
+ """Test that only shape with batch size=1 is supported."""
+ with pytest.raises(Exception, match="Only the input batch_size=1 is supported!"):
+ representative_dataset([2, 3, 3], 5)
+
+
+def test_convert_saved_model_to_tflite(test_tf_model: Path) -> None:
+ """Test converting SavedModel to TensorFlow Lite."""
+ result = convert_to_tflite(test_tf_model.as_posix())
+ assert isinstance(result, bytes)
+
+
+def test_convert_keras_to_tflite(test_keras_model: Path) -> None:
"""Test converting Keras model to TensorFlow Lite."""
keras_model = tf.keras.models.load_model(str(test_keras_model))
- tflite_model = convert_to_tflite(keras_model)
-
- assert tflite_model
+ result = convert_to_tflite(keras_model)
+ assert isinstance(result, bytes)
def test_save_keras_model(tmp_path: Path, test_keras_model: Path) -> None:
@@ -46,6 +74,14 @@ def test_save_tflite_model(tmp_path: Path, test_keras_model: Path) -> None:
assert interpreter
+def test_convert_unknown_model_to_tflite() -> None:
+ """Test that unknown model type cannot be converted to TensorFlow Lite."""
+ with pytest.raises(
+ ValueError, match="Unable to create TensorFlow Lite converter for 123"
+ ):
+ convert_to_tflite(123)
+
+
@pytest.mark.parametrize(
"model_path, expected_result",
[
diff --git a/tests/test_utils_logging.py b/tests/test_utils_logging.py
index 1e212b2..ac835c6 100644
--- a/tests/test_utils_logging.py
+++ b/tests/test_utils_logging.py
@@ -8,10 +8,14 @@ import sys
from contextlib import ExitStack as does_not_raise
from pathlib import Path
from typing import Any
+from typing import Callable
+from unittest.mock import MagicMock
import pytest
-from mlia.cli.logging import create_log_handler
+from mlia.utils.logging import create_log_handler
+from mlia.utils.logging import redirect_output
+from mlia.utils.logging import redirect_raw_output
@pytest.mark.parametrize(
@@ -62,3 +66,21 @@ def test_create_log_handler(
delay=delay,
)
assert isinstance(handler, expected_class)
+
+
+@pytest.mark.parametrize(
+ "redirect_context_manager",
+ [
+ redirect_raw_output,
+ redirect_output,
+ ],
+)
+def test_output_redirection(redirect_context_manager: Callable) -> None:
+ """Test output redirection via context manager."""
+ print("before redirect")
+ logger_mock = MagicMock()
+ with redirect_context_manager(logger_mock):
+ print("output redirected")
+ print("after redirect")
+
+ logger_mock.log.assert_called_once_with(logging.INFO, "output redirected")