From 6a88ee5315b4ce5b023370c1e55e48bf9f2b6f67 Mon Sep 17 00:00:00 2001 From: Dmitrii Agibov Date: Fri, 18 Nov 2022 17:21:09 +0000 Subject: Rename modules - Rename module "mlia.devices" into "mlia.target" - Rename module "mlia.target.ethosu" into "mlia.target.ethos_u" - Rename module "mlia.target.cortexa" into "mlia.target.cortex_a" - Rename and update tests Change-Id: I6dca7c8646d881f739fb6b5914d1cc7e45e63dc2 --- src/mlia/api.py | 12 +- src/mlia/devices/__init__.py | 3 - src/mlia/devices/config.py | 11 - src/mlia/devices/cortexa/__init__.py | 3 - src/mlia/devices/cortexa/advice_generation.py | 153 ------- src/mlia/devices/cortexa/advisor.py | 92 ---- src/mlia/devices/cortexa/config.py | 20 - src/mlia/devices/cortexa/data_analysis.py | 128 ------ src/mlia/devices/cortexa/data_collection.py | 51 --- src/mlia/devices/cortexa/events.py | 24 - src/mlia/devices/cortexa/handlers.py | 39 -- src/mlia/devices/cortexa/operator_compatibility.py | 184 -------- src/mlia/devices/cortexa/operators.py | 148 ------- src/mlia/devices/cortexa/reporters.py | 140 ------ src/mlia/devices/ethosu/__init__.py | 3 - src/mlia/devices/ethosu/advice_generation.py | 206 --------- src/mlia/devices/ethosu/advisor.py | 194 --------- src/mlia/devices/ethosu/config.py | 90 ---- src/mlia/devices/ethosu/data_analysis.py | 153 ------- src/mlia/devices/ethosu/data_collection.py | 187 -------- src/mlia/devices/ethosu/events.py | 24 - src/mlia/devices/ethosu/handlers.py | 55 --- src/mlia/devices/ethosu/operators.py | 14 - src/mlia/devices/ethosu/performance.py | 261 ----------- src/mlia/devices/ethosu/reporters.py | 385 ---------------- src/mlia/devices/tosa/__init__.py | 3 - src/mlia/devices/tosa/advice_generation.py | 40 -- src/mlia/devices/tosa/advisor.py | 94 ---- src/mlia/devices/tosa/config.py | 19 - src/mlia/devices/tosa/data_analysis.py | 36 -- src/mlia/devices/tosa/data_collection.py | 30 -- src/mlia/devices/tosa/events.py | 24 - src/mlia/devices/tosa/handlers.py | 36 -- src/mlia/devices/tosa/operators.py | 11 - src/mlia/devices/tosa/reporters.py | 83 ---- src/mlia/target/__init__.py | 3 + src/mlia/target/config.py | 11 + src/mlia/target/cortex_a/__init__.py | 3 + src/mlia/target/cortex_a/advice_generation.py | 153 +++++++ src/mlia/target/cortex_a/advisor.py | 92 ++++ src/mlia/target/cortex_a/config.py | 20 + src/mlia/target/cortex_a/data_analysis.py | 128 ++++++ src/mlia/target/cortex_a/data_collection.py | 51 +++ src/mlia/target/cortex_a/events.py | 24 + src/mlia/target/cortex_a/handlers.py | 39 ++ src/mlia/target/cortex_a/operator_compatibility.py | 184 ++++++++ src/mlia/target/cortex_a/operators.py | 148 +++++++ src/mlia/target/cortex_a/reporters.py | 140 ++++++ src/mlia/target/ethos_u/__init__.py | 3 + src/mlia/target/ethos_u/advice_generation.py | 206 +++++++++ src/mlia/target/ethos_u/advisor.py | 194 +++++++++ src/mlia/target/ethos_u/config.py | 90 ++++ src/mlia/target/ethos_u/data_analysis.py | 153 +++++++ src/mlia/target/ethos_u/data_collection.py | 187 ++++++++ src/mlia/target/ethos_u/events.py | 24 + src/mlia/target/ethos_u/handlers.py | 55 +++ src/mlia/target/ethos_u/operators.py | 14 + src/mlia/target/ethos_u/performance.py | 261 +++++++++++ src/mlia/target/ethos_u/reporters.py | 385 ++++++++++++++++ src/mlia/target/tosa/__init__.py | 3 + src/mlia/target/tosa/advice_generation.py | 40 ++ src/mlia/target/tosa/advisor.py | 94 ++++ src/mlia/target/tosa/config.py | 19 + src/mlia/target/tosa/data_analysis.py | 36 ++ src/mlia/target/tosa/data_collection.py | 30 ++ src/mlia/target/tosa/events.py | 24 + src/mlia/target/tosa/handlers.py | 36 ++ src/mlia/target/tosa/operators.py | 11 + src/mlia/target/tosa/reporters.py | 83 ++++ tests/conftest.py | 2 +- tests/test_api.py | 8 +- tests/test_backend_vela_compat.py | 2 +- tests/test_backend_vela_compiler.py | 2 +- tests/test_backend_vela_performance.py | 2 +- tests/test_cli_commands.py | 10 +- tests/test_devices_cortexa_advice_generation.py | 196 --------- tests/test_devices_cortexa_advisor.py | 34 -- tests/test_devices_cortexa_data_analysis.py | 162 ------- tests/test_devices_cortexa_data_collection.py | 52 --- tests/test_devices_cortexa_operators.py | 73 ---- tests/test_devices_cortexa_reporters.py | 53 --- tests/test_devices_ethosu_advice_generation.py | 482 --------------------- tests/test_devices_ethosu_advisor.py | 9 - tests/test_devices_ethosu_config.py | 125 ------ tests/test_devices_ethosu_data_analysis.py | 147 ------- tests/test_devices_ethosu_data_collection.py | 151 ------- tests/test_devices_ethosu_performance.py | 28 -- tests/test_devices_ethosu_reporters.py | 353 --------------- tests/test_devices_tosa_advice_generation.py | 56 --- tests/test_devices_tosa_advisor.py | 29 -- tests/test_devices_tosa_data_analysis.py | 33 -- tests/test_devices_tosa_data_collection.py | 28 -- tests/test_mlia_utils_py_manager.py | 73 ---- tests/test_target_cortex_a_advice_generation.py | 196 +++++++++ tests/test_target_cortex_a_advisor.py | 34 ++ tests/test_target_cortex_a_data_analysis.py | 162 +++++++ tests/test_target_cortex_a_data_collection.py | 52 +++ tests/test_target_cortex_a_operators.py | 73 ++++ tests/test_target_cortex_a_reporters.py | 53 +++ tests/test_target_ethos_u_advice_generation.py | 482 +++++++++++++++++++++ tests/test_target_ethos_u_advisor.py | 9 + tests/test_target_ethos_u_config.py | 125 ++++++ tests/test_target_ethos_u_data_analysis.py | 147 +++++++ tests/test_target_ethos_u_data_collection.py | 151 +++++++ tests/test_target_ethos_u_performance.py | 28 ++ tests/test_target_ethos_u_reporters.py | 353 +++++++++++++++ tests/test_target_tosa_advice_generation.py | 56 +++ tests/test_target_tosa_advisor.py | 29 ++ tests/test_target_tosa_data_analysis.py | 33 ++ tests/test_target_tosa_data_collection.py | 28 ++ tests/test_utils_py_manager.py | 73 ++++ 111 files changed, 5047 insertions(+), 5047 deletions(-) delete mode 100644 src/mlia/devices/__init__.py delete mode 100644 src/mlia/devices/config.py delete mode 100644 src/mlia/devices/cortexa/__init__.py delete mode 100644 src/mlia/devices/cortexa/advice_generation.py delete mode 100644 src/mlia/devices/cortexa/advisor.py delete mode 100644 src/mlia/devices/cortexa/config.py delete mode 100644 src/mlia/devices/cortexa/data_analysis.py delete mode 100644 src/mlia/devices/cortexa/data_collection.py delete mode 100644 src/mlia/devices/cortexa/events.py delete mode 100644 src/mlia/devices/cortexa/handlers.py delete mode 100644 src/mlia/devices/cortexa/operator_compatibility.py delete mode 100644 src/mlia/devices/cortexa/operators.py delete mode 100644 src/mlia/devices/cortexa/reporters.py delete mode 100644 src/mlia/devices/ethosu/__init__.py delete mode 100644 src/mlia/devices/ethosu/advice_generation.py delete mode 100644 src/mlia/devices/ethosu/advisor.py delete mode 100644 src/mlia/devices/ethosu/config.py delete mode 100644 src/mlia/devices/ethosu/data_analysis.py delete mode 100644 src/mlia/devices/ethosu/data_collection.py delete mode 100644 src/mlia/devices/ethosu/events.py delete mode 100644 src/mlia/devices/ethosu/handlers.py delete mode 100644 src/mlia/devices/ethosu/operators.py delete mode 100644 src/mlia/devices/ethosu/performance.py delete mode 100644 src/mlia/devices/ethosu/reporters.py delete mode 100644 src/mlia/devices/tosa/__init__.py delete mode 100644 src/mlia/devices/tosa/advice_generation.py delete mode 100644 src/mlia/devices/tosa/advisor.py delete mode 100644 src/mlia/devices/tosa/config.py delete mode 100644 src/mlia/devices/tosa/data_analysis.py delete mode 100644 src/mlia/devices/tosa/data_collection.py delete mode 100644 src/mlia/devices/tosa/events.py delete mode 100644 src/mlia/devices/tosa/handlers.py delete mode 100644 src/mlia/devices/tosa/operators.py delete mode 100644 src/mlia/devices/tosa/reporters.py create mode 100644 src/mlia/target/__init__.py create mode 100644 src/mlia/target/config.py create mode 100644 src/mlia/target/cortex_a/__init__.py create mode 100644 src/mlia/target/cortex_a/advice_generation.py create mode 100644 src/mlia/target/cortex_a/advisor.py create mode 100644 src/mlia/target/cortex_a/config.py create mode 100644 src/mlia/target/cortex_a/data_analysis.py create mode 100644 src/mlia/target/cortex_a/data_collection.py create mode 100644 src/mlia/target/cortex_a/events.py create mode 100644 src/mlia/target/cortex_a/handlers.py create mode 100644 src/mlia/target/cortex_a/operator_compatibility.py create mode 100644 src/mlia/target/cortex_a/operators.py create mode 100644 src/mlia/target/cortex_a/reporters.py create mode 100644 src/mlia/target/ethos_u/__init__.py create mode 100644 src/mlia/target/ethos_u/advice_generation.py create mode 100644 src/mlia/target/ethos_u/advisor.py create mode 100644 src/mlia/target/ethos_u/config.py create mode 100644 src/mlia/target/ethos_u/data_analysis.py create mode 100644 src/mlia/target/ethos_u/data_collection.py create mode 100644 src/mlia/target/ethos_u/events.py create mode 100644 src/mlia/target/ethos_u/handlers.py create mode 100644 src/mlia/target/ethos_u/operators.py create mode 100644 src/mlia/target/ethos_u/performance.py create mode 100644 src/mlia/target/ethos_u/reporters.py create mode 100644 src/mlia/target/tosa/__init__.py create mode 100644 src/mlia/target/tosa/advice_generation.py create mode 100644 src/mlia/target/tosa/advisor.py create mode 100644 src/mlia/target/tosa/config.py create mode 100644 src/mlia/target/tosa/data_analysis.py create mode 100644 src/mlia/target/tosa/data_collection.py create mode 100644 src/mlia/target/tosa/events.py create mode 100644 src/mlia/target/tosa/handlers.py create mode 100644 src/mlia/target/tosa/operators.py create mode 100644 src/mlia/target/tosa/reporters.py delete mode 100644 tests/test_devices_cortexa_advice_generation.py delete mode 100644 tests/test_devices_cortexa_advisor.py delete mode 100644 tests/test_devices_cortexa_data_analysis.py delete mode 100644 tests/test_devices_cortexa_data_collection.py delete mode 100644 tests/test_devices_cortexa_operators.py delete mode 100644 tests/test_devices_cortexa_reporters.py delete mode 100644 tests/test_devices_ethosu_advice_generation.py delete mode 100644 tests/test_devices_ethosu_advisor.py delete mode 100644 tests/test_devices_ethosu_config.py delete mode 100644 tests/test_devices_ethosu_data_analysis.py delete mode 100644 tests/test_devices_ethosu_data_collection.py delete mode 100644 tests/test_devices_ethosu_performance.py delete mode 100644 tests/test_devices_ethosu_reporters.py delete mode 100644 tests/test_devices_tosa_advice_generation.py delete mode 100644 tests/test_devices_tosa_advisor.py delete mode 100644 tests/test_devices_tosa_data_analysis.py delete mode 100644 tests/test_devices_tosa_data_collection.py delete mode 100644 tests/test_mlia_utils_py_manager.py create mode 100644 tests/test_target_cortex_a_advice_generation.py create mode 100644 tests/test_target_cortex_a_advisor.py create mode 100644 tests/test_target_cortex_a_data_analysis.py create mode 100644 tests/test_target_cortex_a_data_collection.py create mode 100644 tests/test_target_cortex_a_operators.py create mode 100644 tests/test_target_cortex_a_reporters.py create mode 100644 tests/test_target_ethos_u_advice_generation.py create mode 100644 tests/test_target_ethos_u_advisor.py create mode 100644 tests/test_target_ethos_u_config.py create mode 100644 tests/test_target_ethos_u_data_analysis.py create mode 100644 tests/test_target_ethos_u_data_collection.py create mode 100644 tests/test_target_ethos_u_performance.py create mode 100644 tests/test_target_ethos_u_reporters.py create mode 100644 tests/test_target_tosa_advice_generation.py create mode 100644 tests/test_target_tosa_advisor.py create mode 100644 tests/test_target_tosa_data_analysis.py create mode 100644 tests/test_target_tosa_data_collection.py create mode 100644 tests/test_utils_py_manager.py diff --git a/src/mlia/api.py b/src/mlia/api.py index 6af7db2..c7be9ec 100644 --- a/src/mlia/api.py +++ b/src/mlia/api.py @@ -12,12 +12,12 @@ from mlia.core.advisor import InferenceAdvisor from mlia.core.common import AdviceCategory from mlia.core.context import ExecutionContext from mlia.core.typing import PathOrFileLike -from mlia.devices.cortexa.advisor import configure_and_get_cortexa_advisor -from mlia.devices.cortexa.operators import report as cortex_a_report -from mlia.devices.ethosu.advisor import configure_and_get_ethosu_advisor -from mlia.devices.ethosu.operators import report as ethos_u_report -from mlia.devices.tosa.advisor import configure_and_get_tosa_advisor -from mlia.devices.tosa.operators import report as tosa_report +from mlia.target.cortex_a.advisor import configure_and_get_cortexa_advisor +from mlia.target.cortex_a.operators import report as cortex_a_report +from mlia.target.ethos_u.advisor import configure_and_get_ethosu_advisor +from mlia.target.ethos_u.operators import report as ethos_u_report +from mlia.target.tosa.advisor import configure_and_get_tosa_advisor +from mlia.target.tosa.operators import report as tosa_report from mlia.utils.filesystem import get_target logger = logging.getLogger(__name__) diff --git a/src/mlia/devices/__init__.py b/src/mlia/devices/__init__.py deleted file mode 100644 index d533f4a..0000000 --- a/src/mlia/devices/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Devices module.""" diff --git a/src/mlia/devices/config.py b/src/mlia/devices/config.py deleted file mode 100644 index 7ab6b43..0000000 --- a/src/mlia/devices/config.py +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""IP configuration module.""" - - -class IPConfiguration: # pylint: disable=too-few-public-methods - """Base class for IP configuration.""" - - def __init__(self, target: str) -> None: - """Init IP configuration instance.""" - self.target = target diff --git a/src/mlia/devices/cortexa/__init__.py b/src/mlia/devices/cortexa/__init__.py deleted file mode 100644 index 3a987e7..0000000 --- a/src/mlia/devices/cortexa/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A devices module.""" diff --git a/src/mlia/devices/cortexa/advice_generation.py b/src/mlia/devices/cortexa/advice_generation.py deleted file mode 100644 index bab9530..0000000 --- a/src/mlia/devices/cortexa/advice_generation.py +++ /dev/null @@ -1,153 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A advice generation.""" -from functools import singledispatchmethod - -from mlia.core.advice_generation import advice_category -from mlia.core.advice_generation import FactBasedAdviceProducer -from mlia.core.common import AdviceCategory -from mlia.core.common import DataItem -from mlia.devices.cortexa.data_analysis import ModelHasCustomOperators -from mlia.devices.cortexa.data_analysis import ModelIsCortexACompatible -from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible -from mlia.devices.cortexa.data_analysis import ModelIsNotTFLiteCompatible -from mlia.devices.cortexa.data_analysis import TFLiteCompatibilityCheckFailed - - -class CortexAAdviceProducer(FactBasedAdviceProducer): - """Cortex-A advice producer.""" - - cortex_a_disclaimer = ( - "Note that the provided compatibility information is general. " - "At runtime individual operators in the given model might fall back to " - "the TensorFlow Lite reference or might produce errors based on the " - "specific parameters." - ) - - @singledispatchmethod - def produce_advice(self, _data_item: DataItem) -> None: # type: ignore - """Produce advice.""" - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_model_is_cortex_a_compatible( - self, data_item: ModelIsCortexACompatible - ) -> None: - """Advice for Cortex-A compatibility.""" - self.add_advice( - [ - f"Model is fully compatible with {data_item.backend_info} for " - "Cortex-A.", - self.cortex_a_disclaimer, - ] - ) - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_model_is_not_cortex_a_compatible( - self, data_item: ModelIsNotCortexACompatible - ) -> None: - """Advice for Cortex-A compatibility.""" - if data_item.unsupported_ops: - self.add_advice( - [ - "The following operators are not supported by " - f"{data_item.backend_info} and will fall back to the " - "TensorFlow Lite runtime:", - "\n".join(f" - {op}" for op in data_item.unsupported_ops), - ] - ) - - if data_item.activation_func_support: - self.add_advice( - [ - "The fused activation functions of the following operators " - f"are not supported by {data_item.backend_info}. Please " - "consider using one of the supported activation functions " - "instead:", - "\n".join( - f" - {op}\n" - f" - Used unsupported: {act.used_unsupported}\n" - f" - Supported: {act.supported}" - for op, act in data_item.activation_func_support.items() - ), - ] - ) - - self.add_advice( - [ - "Please, refer to the full table of operators above for more " - "information.", - self.cortex_a_disclaimer, - ] - ) - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_model_is_not_tflite_compatible( - self, data_item: ModelIsNotTFLiteCompatible - ) -> None: - """Advice for TensorFlow Lite compatibility.""" - if data_item.flex_ops: - self.add_advice( - [ - "The following operators are not natively " - "supported by TensorFlow Lite: " - f"{', '.join(data_item.flex_ops)}.", - "Using select TensorFlow operators in TensorFlow Lite model " - "requires special initialization of TFLiteConverter and " - "TensorFlow Lite run-time.", - "Please refer to the TensorFlow documentation for more " - "details: https://www.tensorflow.org/lite/guide/ops_select", - "Note, such models are not supported by the ML Inference Advisor.", - ] - ) - - if data_item.custom_ops: - self.add_advice( - [ - "The following operators appear to be custom and not natively " - "supported by TensorFlow Lite: " - f"{', '.join(data_item.custom_ops)}.", - "Using custom operators in TensorFlow Lite model " - "requires special initialization of TFLiteConverter and " - "TensorFlow Lite run-time.", - "Please refer to the TensorFlow documentation for more " - "details: https://www.tensorflow.org/lite/guide/ops_custom", - "Note, such models are not supported by the ML Inference Advisor.", - ] - ) - - if not data_item.flex_ops and not data_item.custom_ops: - self.add_advice( - [ - "Model could not be converted into TensorFlow Lite format.", - "Please refer to the table for more details.", - ] - ) - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_tflite_check_failed( - self, _data_item: TFLiteCompatibilityCheckFailed - ) -> None: - """Advice for the failed TensorFlow Lite compatibility checks.""" - self.add_advice( - [ - "Model could not be converted into TensorFlow Lite format.", - "Please refer to the table for more details.", - ] - ) - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_model_has_custom_operators( - self, _data_item: ModelHasCustomOperators - ) -> None: - """Advice for the models with custom operators.""" - self.add_advice( - [ - "Models with custom operators require special initialization " - "and currently are not supported by the ML Inference Advisor.", - ] - ) diff --git a/src/mlia/devices/cortexa/advisor.py b/src/mlia/devices/cortexa/advisor.py deleted file mode 100644 index ffbbea5..0000000 --- a/src/mlia/devices/cortexa/advisor.py +++ /dev/null @@ -1,92 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A MLIA module.""" -from __future__ import annotations - -from pathlib import Path -from typing import Any - -from mlia.core.advice_generation import AdviceProducer -from mlia.core.advisor import DefaultInferenceAdvisor -from mlia.core.advisor import InferenceAdvisor -from mlia.core.common import AdviceCategory -from mlia.core.context import Context -from mlia.core.context import ExecutionContext -from mlia.core.data_analysis import DataAnalyzer -from mlia.core.data_collection import DataCollector -from mlia.core.events import Event -from mlia.core.typing import PathOrFileLike -from mlia.devices.cortexa.advice_generation import CortexAAdviceProducer -from mlia.devices.cortexa.config import CortexAConfiguration -from mlia.devices.cortexa.data_analysis import CortexADataAnalyzer -from mlia.devices.cortexa.data_collection import CortexAOperatorCompatibility -from mlia.devices.cortexa.events import CortexAAdvisorStartedEvent -from mlia.devices.cortexa.handlers import CortexAEventHandler - - -class CortexAInferenceAdvisor(DefaultInferenceAdvisor): - """Cortex-A Inference Advisor.""" - - @classmethod - def name(cls) -> str: - """Return name of the advisor.""" - return "cortex_a_inference_advisor" - - def get_collectors(self, context: Context) -> list[DataCollector]: - """Return list of the data collectors.""" - model = self.get_model(context) - - collectors: list[DataCollector] = [] - - if AdviceCategory.OPERATORS in context.advice_category: - collectors.append(CortexAOperatorCompatibility(model)) - - return collectors - - def get_analyzers(self, context: Context) -> list[DataAnalyzer]: - """Return list of the data analyzers.""" - return [ - CortexADataAnalyzer(), - ] - - def get_producers(self, context: Context) -> list[AdviceProducer]: - """Return list of the advice producers.""" - return [CortexAAdviceProducer()] - - def get_events(self, context: Context) -> list[Event]: - """Return list of the startup events.""" - model = self.get_model(context) - target_profile = self.get_target_profile(context) - - return [ - CortexAAdvisorStartedEvent(model, CortexAConfiguration(target_profile)), - ] - - -def configure_and_get_cortexa_advisor( - context: ExecutionContext, - target_profile: str, - model: str | Path, - output: PathOrFileLike | None = None, - **_extra_args: Any, -) -> InferenceAdvisor: - """Create and configure Cortex-A advisor.""" - if context.event_handlers is None: - context.event_handlers = [CortexAEventHandler(output)] - - if context.config_parameters is None: - context.config_parameters = _get_config_parameters(model, target_profile) - - return CortexAInferenceAdvisor() - - -def _get_config_parameters(model: str | Path, target_profile: str) -> dict[str, Any]: - """Get configuration parameters for the advisor.""" - advisor_parameters: dict[str, Any] = { - "cortex_a_inference_advisor": { - "model": str(model), - "target_profile": target_profile, - }, - } - - return advisor_parameters diff --git a/src/mlia/devices/cortexa/config.py b/src/mlia/devices/cortexa/config.py deleted file mode 100644 index ec0cf0a..0000000 --- a/src/mlia/devices/cortexa/config.py +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A configuration.""" -from __future__ import annotations - -from mlia.devices.config import IPConfiguration -from mlia.utils.filesystem import get_profile - - -class CortexAConfiguration(IPConfiguration): # pylint: disable=too-few-public-methods - """Cortex-A configuration.""" - - def __init__(self, target_profile: str) -> None: - """Init Cortex-A target configuration.""" - target_data = get_profile(target_profile) - - target = target_data["target"] - if target != "cortex-a": - raise Exception(f"Wrong target {target} for Cortex-A configuration") - super().__init__(target) diff --git a/src/mlia/devices/cortexa/data_analysis.py b/src/mlia/devices/cortexa/data_analysis.py deleted file mode 100644 index 04bc819..0000000 --- a/src/mlia/devices/cortexa/data_analysis.py +++ /dev/null @@ -1,128 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A data analysis module.""" -from __future__ import annotations - -from collections import defaultdict -from dataclasses import dataclass -from dataclasses import field -from functools import singledispatchmethod - -from mlia.core.common import DataItem -from mlia.core.data_analysis import Fact -from mlia.core.data_analysis import FactExtractor -from mlia.devices.cortexa.operators import CortexACompatibilityInfo -from mlia.devices.cortexa.operators import Operator -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo - - -class CortexADataAnalyzer(FactExtractor): - """Cortex-A data analyzer.""" - - @singledispatchmethod - def analyze_data(self, data_item: DataItem) -> None: # type: ignore - """Analyse the data.""" - - @analyze_data.register - def analyze_operator_compatibility( - self, data_item: CortexACompatibilityInfo - ) -> None: - """Analyse operator compatibility information.""" - if data_item.cortex_a_compatible: - self.add_fact(ModelIsCortexACompatible(data_item.backend_info)) - else: - unsupported_ops = set() - activation_func_support: defaultdict[ - str, ModelIsNotCortexACompatible.ActivationFunctionSupport - ] = defaultdict(ModelIsNotCortexACompatible.ActivationFunctionSupport) - for oper in data_item.operators: - if oper.support_type == Operator.SupportType.OP_NOT_SUPPORTED: - unsupported_ops.add(oper.full_name) - - if oper.support_type == Operator.SupportType.ACTIVATION_NOT_SUPPORTED: - # Add used but unsupported actication functions - activation_func_support[oper.full_name].used_unsupported.add( - oper.activation_func.name - ) - # Add supported activation functions - activation_func_support[oper.full_name].supported.update( - oper.supported_activation_functions - ) - - assert ( - unsupported_ops or activation_func_support or not data_item.operators - ), ( - "The model is marked as not compatible with Cortex-A but there " - "are no unsupported ops activation functions listed." - ) - - self.add_fact( - ModelIsNotCortexACompatible( - data_item.backend_info, unsupported_ops, activation_func_support - ) - ) - - @analyze_data.register - def analyze_tflite_compatibility(self, data_item: TFLiteCompatibilityInfo) -> None: - """Analyze TensorFlow Lite compatibility information.""" - if data_item.compatible: - return - - if data_item.conversion_failed_with_errors: - self.add_fact( - ModelIsNotTFLiteCompatible( - custom_ops=data_item.required_custom_ops, - flex_ops=data_item.required_flex_ops, - ) - ) - - if data_item.check_failed_with_unknown_error: - self.add_fact(TFLiteCompatibilityCheckFailed()) - - if data_item.conversion_failed_for_model_with_custom_ops: - self.add_fact(ModelHasCustomOperators()) - - -@dataclass -class CortexACompatibility(Fact): - """Base class for Cortex-A compatibility providing backend info.""" - - backend_info: str - - -@dataclass -class ModelIsCortexACompatible(CortexACompatibility): - """Model is completely compatible with Cortex-A.""" - - -@dataclass -class ModelIsNotCortexACompatible(CortexACompatibility): - """Model is not compatible with Cortex-A.""" - - @dataclass - class ActivationFunctionSupport: - """Activation function support per operator.""" - - used_unsupported: set[str] = field(default_factory=set) - supported: set[str] = field(default_factory=set) - - unsupported_ops: set[str] - activation_func_support: dict[str, ActivationFunctionSupport] - - -@dataclass -class ModelIsNotTFLiteCompatible(Fact): - """Model could not be converted into TensorFlow Lite format.""" - - custom_ops: list[str] | None = None - flex_ops: list[str] | None = None - - -@dataclass -class TFLiteCompatibilityCheckFailed(Fact): - """TensorFlow Lite compatibility check failed by unknown reason.""" - - -@dataclass -class ModelHasCustomOperators(Fact): - """Model could not be loaded because it contains custom ops.""" diff --git a/src/mlia/devices/cortexa/data_collection.py b/src/mlia/devices/cortexa/data_collection.py deleted file mode 100644 index f4d5a82..0000000 --- a/src/mlia/devices/cortexa/data_collection.py +++ /dev/null @@ -1,51 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Data collection module for Cortex-A.""" -from __future__ import annotations - -import logging -from pathlib import Path - -from mlia.core.data_collection import ContextAwareDataCollector -from mlia.devices.cortexa.operators import CortexACompatibilityInfo -from mlia.devices.cortexa.operators import get_cortex_a_compatibility_info -from mlia.nn.tensorflow.config import get_tflite_model -from mlia.nn.tensorflow.tflite_compat import TFLiteChecker -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo -from mlia.nn.tensorflow.utils import is_tflite_model -from mlia.utils.logging import log_action - - -logger = logging.getLogger(__name__) - - -class CortexAOperatorCompatibility(ContextAwareDataCollector): - """Collect operator compatibility information.""" - - def __init__(self, model: Path) -> None: - """Init operator compatibility data collector.""" - self.model = model - - def collect_data(self) -> TFLiteCompatibilityInfo | CortexACompatibilityInfo | None: - """Collect operator compatibility information.""" - if not is_tflite_model(self.model): - with log_action("Checking TensorFlow Lite compatibility ..."): - tflite_checker = TFLiteChecker() - tflite_compat = tflite_checker.check_compatibility(self.model) - - if not tflite_compat.compatible: - return tflite_compat - - tflite_model = get_tflite_model(self.model, self.context) - - with log_action("Checking operator compatibility ..."): - return ( - get_cortex_a_compatibility_info( # pylint: disable=assignment-from-none - Path(tflite_model.model_path) - ) - ) - - @classmethod - def name(cls) -> str: - """Return name of the collector.""" - return "cortex_a_operator_compatibility" diff --git a/src/mlia/devices/cortexa/events.py b/src/mlia/devices/cortexa/events.py deleted file mode 100644 index dece4c7..0000000 --- a/src/mlia/devices/cortexa/events.py +++ /dev/null @@ -1,24 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A MLIA module events.""" -from dataclasses import dataclass -from pathlib import Path - -from mlia.core.events import Event -from mlia.core.events import EventDispatcher -from mlia.devices.cortexa.config import CortexAConfiguration - - -@dataclass -class CortexAAdvisorStartedEvent(Event): - """Event with Cortex-A advisor parameters.""" - - model: Path - device: CortexAConfiguration - - -class CortexAAdvisorEventHandler(EventDispatcher): - """Event handler for the Cortex-A inference advisor.""" - - def on_cortex_a_advisor_started(self, event: CortexAAdvisorStartedEvent) -> None: - """Handle CortexAAdvisorStarted event.""" diff --git a/src/mlia/devices/cortexa/handlers.py b/src/mlia/devices/cortexa/handlers.py deleted file mode 100644 index 7ed2b75..0000000 --- a/src/mlia/devices/cortexa/handlers.py +++ /dev/null @@ -1,39 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Event handler.""" -from __future__ import annotations - -import logging - -from mlia.core.events import CollectedDataEvent -from mlia.core.handlers import WorkflowEventsHandler -from mlia.core.typing import PathOrFileLike -from mlia.devices.cortexa.events import CortexAAdvisorEventHandler -from mlia.devices.cortexa.events import CortexAAdvisorStartedEvent -from mlia.devices.cortexa.operators import CortexACompatibilityInfo -from mlia.devices.cortexa.reporters import cortex_a_formatters -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo - -logger = logging.getLogger(__name__) - - -class CortexAEventHandler(WorkflowEventsHandler, CortexAAdvisorEventHandler): - """CLI event handler.""" - - def __init__(self, output: PathOrFileLike | None = None) -> None: - """Init event handler.""" - super().__init__(cortex_a_formatters, output) - - def on_collected_data(self, event: CollectedDataEvent) -> None: - """Handle CollectedDataEvent event.""" - data_item = event.data_item - - if isinstance(data_item, CortexACompatibilityInfo): - self.reporter.submit(data_item.operators, delay_print=True) - - if isinstance(data_item, TFLiteCompatibilityInfo) and not data_item.compatible: - self.reporter.submit(data_item, delay_print=True) - - def on_cortex_a_advisor_started(self, event: CortexAAdvisorStartedEvent) -> None: - """Handle CortexAAdvisorStarted event.""" - self.reporter.submit(event.device) diff --git a/src/mlia/devices/cortexa/operator_compatibility.py b/src/mlia/devices/cortexa/operator_compatibility.py deleted file mode 100644 index c474e75..0000000 --- a/src/mlia/devices/cortexa/operator_compatibility.py +++ /dev/null @@ -1,184 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Collection of Cortex-A operator compatibility information.""" -from __future__ import annotations - -from typing import Any - -ARMNN_TFLITE_DELEGATE: dict[str, dict[str, Any]] = { - "metadata": { - "backend": "Arm NN TensorFlow Lite delegate", - "version": "22.08", - }, - # BUILTIN OPERATORS - "builtin_ops": { - "ABS": {}, - "ADD": {}, - "ARG_MAX": {}, - "ARG_MIN": {}, - "AVERAGE_POOL_2D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "BATCH_TO_SPACE_ND": {}, - "CAST": {}, - "CONCATENATION": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "CONV_2D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "CONV_3D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "DEPTH_TO_SPACE": {}, - "DEPTHWISE_CONV_2D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "DEQUANTIZE": {}, - "DIV": {}, - "EQUAL": {}, - "ELU": {}, - "EXP": {}, - "EXPAND_DIMS": {}, - "FILL": {}, - "FLOOR": {}, - "FLOOR_DIV": {}, - "FULLY_CONNECTED": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "GATHER": {}, - "GATHER_ND": {}, - "GREATER": {}, - "GREATER_EQUAL": {}, - "HARD_SWISH": {}, - "L2_NORMALIZATION": {}, - "L2_POOL_2D": {}, - "LESS": {}, - "LESS_EQUAL": {}, - "LOCAL_RESPONSE_NORMALIZATION": {}, - "LOG": {}, - "LOGICAL_AND": {}, - "LOGICAL_NOT": {}, - "LOGICAL_OR": {}, - "LOGISTIC": {}, - "LOG_SOFTMAX": {}, - "LSTM": {}, - "MAXIMUM": {}, - "MAX_POOL_2D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - ] - }, - "MEAN": {}, - "MINIMUM": {}, - "MIRROR_PAD": {}, - "MUL": {}, - "NEG": {}, - "NOT_EQUAL": {}, - "PACK": {}, - "PAD": {}, - "PADV2": {}, - "PRELU": {}, - "QUANTIZE": {}, - "RANK": {}, - "REDUCE_MAX": {}, - "REDUCE_MIN": {}, - "REDUCE_PROD": {}, - "RELU": {}, - "RELU6": {}, - "RELU_N1_TO_1": {}, - "RESHAPE": {}, - "RESIZE_BILINEAR": {}, - "RESIZE_NEAREST_NEIGHBOR": {}, - "RSQRT": {}, - "SHAPE": {}, - "SIN": {}, - "SOFTMAX": {}, - "SPACE_TO_BATCH_ND": {}, - "SPACE_TO_DEPTH": {}, - "SPLIT": {}, - "SPLIT_V": {}, - "SQRT": {}, - "SQUEEZE": {}, - "STRIDED_SLICE": {}, - "SUB": {}, - "SUM": {}, - "TANH": {}, - "TRANSPOSE": {}, - "TRANSPOSE_CONV": {}, - "UNIDIRECTIONAL_SEQUENCE_LSTM": {}, - "UNPACK": {}, - }, - # CUSTOM OPERATORS - "custom_ops": { - "AveragePool3D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "SIGN_BIT", - "TANH", - "NONE", - ] - }, - "MaxPool3D": { - "supported_fused_activation": [ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "SIGN_BIT", - "TANH", - "NONE", - ] - }, - }, -} diff --git a/src/mlia/devices/cortexa/operators.py b/src/mlia/devices/cortexa/operators.py deleted file mode 100644 index 3e84d64..0000000 --- a/src/mlia/devices/cortexa/operators.py +++ /dev/null @@ -1,148 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Cortex-A tools module.""" -from __future__ import annotations - -from dataclasses import dataclass -from enum import Enum -from pathlib import Path -from typing import Any -from typing import ClassVar - -from mlia.devices.cortexa.operator_compatibility import ( - ARMNN_TFLITE_DELEGATE as TFLITE_DELEGATE_COMPAT, -) -from mlia.nn.tensorflow.tflite_graph import Op -from mlia.nn.tensorflow.tflite_graph import parse_subgraphs -from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION - - -@dataclass -class Operator: - """Cortex-A compatibility information of the operator.""" - - BUILTIN_COMPATIBILITY = TFLITE_DELEGATE_COMPAT["builtin_ops"] - CUSTOM_COMPATIBILITY = TFLITE_DELEGATE_COMPAT["custom_ops"] - - class SupportType(Enum): - """Type of operator support.""" - - COMPATIBLE = "Compatible" - OP_NOT_SUPPORTED = "Operator not supported" - ACTIVATION_NOT_SUPPORTED = "Activation not supported" - - name: str - location: str - support_type: SupportType - activation_func: TFL_ACTIVATION_FUNCTION - custom_name: str | None = None - - @property - def is_cortex_a_compatible(self) -> bool: - """Check if this operator is compatible.""" - return self.support_type == Operator.SupportType.COMPATIBLE - - @property - def full_name(self) -> str: - """Returun the full name including the custom name if applicable.""" - return self.name + (f" - '{self.custom_name}'" if self.custom_name else "") - - @property - def is_custom(self) -> bool: - """Check if this is a custom operator.""" - return bool(self.custom_name) - - @property - def compatibility_data(self) -> dict[str, dict[str, Any]]: - """Get the compatibility data (builtin or custom ops).""" - return ( - Operator.CUSTOM_COMPATIBILITY - if self.is_custom - else Operator.BUILTIN_COMPATIBILITY - ) - - @property - def supported_activation_functions(self) -> list[str]: - """Return a list of fused activation functions supported by this op.""" - op_name = self.custom_name if self.custom_name else self.name - return self.compatibility_data[op_name].get("supported_fused_activation", []) - - @classmethod - def from_tflite_op(cls, tfl_op: Op, location: str) -> Operator: - """Create a new instance from TensorFlow Lite operator and location.""" - support_type = cls._get_support_type(tfl_op) - activation_func = ( - tfl_op.builtin_options["fused_activation_function"] - if ( - tfl_op.builtin_options - and "fused_activation_function" in tfl_op.builtin_options - ) - else TFL_ACTIVATION_FUNCTION.NONE - ) - return Operator( - tfl_op.type, - location, - support_type, - activation_func=activation_func, - custom_name=(tfl_op.custom_type if tfl_op.is_custom else None), - ) - - @staticmethod - def _get_support_type(tfl_op: Op) -> Operator.SupportType: - """Get the support type from the TensorFlow Lite operator.""" - compat_data = ( - Operator.CUSTOM_COMPATIBILITY - if tfl_op.is_custom - else Operator.BUILTIN_COMPATIBILITY - ) - op_type = tfl_op.custom_type if tfl_op.is_custom else tfl_op.type - - if op_type not in compat_data: - return Operator.SupportType.OP_NOT_SUPPORTED - - compat_op = compat_data[op_type] - if "supported_fused_activation" in compat_op: - assert tfl_op.builtin_options - assert "fused_activation_function" in tfl_op.builtin_options - if ( - tfl_op.builtin_options["fused_activation_function"] - not in compat_op["supported_fused_activation"] - ): - return Operator.SupportType.ACTIVATION_NOT_SUPPORTED - - return Operator.SupportType.COMPATIBLE - - -@dataclass -class CortexACompatibilityInfo: - """Model's operators.""" - - cortex_a_compatible: bool - operators: list[Operator] - backend_info: ClassVar[str] = ( - f"{TFLITE_DELEGATE_COMPAT['metadata']['backend']} " - f"{TFLITE_DELEGATE_COMPAT['metadata']['version']}" - ) - - -def get_cortex_a_compatibility_info(model_path: Path) -> CortexACompatibilityInfo: - """Return list of model's operators.""" - model = parse_subgraphs(model_path) - - op_list = [ - Operator.from_tflite_op(oper, f"subgraph:{g_idx},oper:{op_idx}") - for g_idx, g in enumerate(model) - for op_idx, oper in enumerate(g) - ] - all_compatible = all(oper.is_cortex_a_compatible for oper in op_list) - compat_info = CortexACompatibilityInfo(all_compatible, op_list) - - return compat_info - - -def report() -> None: - """Generate supported operators report.""" - raise Exception( - "Generating a supported operators report is not " - "currently supported with Cortex-A target profile." - ) diff --git a/src/mlia/devices/cortexa/reporters.py b/src/mlia/devices/cortexa/reporters.py deleted file mode 100644 index 84de10b..0000000 --- a/src/mlia/devices/cortexa/reporters.py +++ /dev/null @@ -1,140 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Reports module.""" -from __future__ import annotations - -from typing import Any -from typing import Callable -from typing import cast - -from mlia.core.advice_generation import Advice -from mlia.core.reporters import report_advice -from mlia.core.reporting import Cell -from mlia.core.reporting import Column -from mlia.core.reporting import Format -from mlia.core.reporting import NestedReport -from mlia.core.reporting import Report -from mlia.core.reporting import ReportItem -from mlia.core.reporting import Table -from mlia.devices.cortexa.config import CortexAConfiguration -from mlia.devices.cortexa.operators import Operator -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo -from mlia.utils.console import style_improvement -from mlia.utils.types import is_list_of - - -def report_device(device: CortexAConfiguration) -> Report: - """Generate report for the device.""" - return NestedReport( - "Device information", - "device", - [ - ReportItem("Target", alias="target", value=device.target), - ], - ) - - -def report_tflite_compatiblity(compat_info: TFLiteCompatibilityInfo) -> Report: - """Generate report for the TensorFlow Lite compatibility information.""" - if compat_info.conversion_errors: - return Table( - [ - Column("#", only_for=["plain_text"]), - Column("Operator", alias="operator"), - Column( - "Operator location", - alias="operator_location", - fmt=Format(wrap_width=25), - ), - Column("Error code", alias="error_code"), - Column( - "Error message", alias="error_message", fmt=Format(wrap_width=25) - ), - ], - [ - ( - index + 1, - err.operator, - ", ".join(err.location), - err.code.name, - err.message, - ) - for index, err in enumerate(compat_info.conversion_errors) - ], - name="TensorFlow Lite conversion errors", - alias="tensorflow_lite_conversion_errors", - ) - - return Table( - columns=[ - Column("Reason", alias="reason"), - Column( - "Exception details", - alias="exception_details", - fmt=Format(wrap_width=40), - ), - ], - rows=[ - ( - "TensorFlow Lite compatibility check failed with exception", - str(compat_info.conversion_exception), - ), - ], - name="TensorFlow Lite compatibility errors", - alias="tflite_compatibility", - ) - - -def report_cortex_a_operators(ops: list[Operator]) -> Report: - """Generate report for the operators.""" - return Table( - [ - Column("#", only_for=["plain_text"]), - Column( - "Operator location", - alias="operator_location", - fmt=Format(wrap_width=30), - ), - Column("Operator name", alias="operator_name", fmt=Format(wrap_width=20)), - Column( - "Arm NN TFLite Delegate compatibility", - alias="cortex_a_compatible", - fmt=Format(wrap_width=40), - ), - ], - [ - ( - index + 1, - op.location, - op.full_name, - Cell( - op.support_type, - Format( - wrap_width=30, - style=style_improvement(op.is_cortex_a_compatible), - str_fmt=lambda v: cast(str, v.value), - ), - ), - ) - for index, op in enumerate(ops) - ], - name="Operators", - alias="operators", - ) - - -def cortex_a_formatters(data: Any) -> Callable[[Any], Report]: - """Find appropriate formatter for the provided data.""" - if is_list_of(data, Advice): - return report_advice - - if isinstance(data, CortexAConfiguration): - return report_device - - if isinstance(data, TFLiteCompatibilityInfo): - return report_tflite_compatiblity - - if is_list_of(data, Operator): - return report_cortex_a_operators - - raise Exception(f"Unable to find appropriate formatter for {data}") diff --git a/src/mlia/devices/ethosu/__init__.py b/src/mlia/devices/ethosu/__init__.py deleted file mode 100644 index 73925e1..0000000 --- a/src/mlia/devices/ethosu/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Ethos-U devices module.""" diff --git a/src/mlia/devices/ethosu/advice_generation.py b/src/mlia/devices/ethosu/advice_generation.py deleted file mode 100644 index 1910460..0000000 --- a/src/mlia/devices/ethosu/advice_generation.py +++ /dev/null @@ -1,206 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Ethos-U advice generation.""" -from __future__ import annotations - -from functools import singledispatchmethod - -from mlia.core.advice_generation import Advice -from mlia.core.advice_generation import advice_category -from mlia.core.advice_generation import ContextAwareAdviceProducer -from mlia.core.advice_generation import FactBasedAdviceProducer -from mlia.core.common import AdviceCategory -from mlia.core.common import DataItem -from mlia.devices.ethosu.data_analysis import AllOperatorsSupportedOnNPU -from mlia.devices.ethosu.data_analysis import HasCPUOnlyOperators -from mlia.devices.ethosu.data_analysis import HasUnsupportedOnNPUOperators -from mlia.devices.ethosu.data_analysis import OptimizationResults -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings - - -class EthosUAdviceProducer(FactBasedAdviceProducer): - """Ethos-U advice producer.""" - - @singledispatchmethod - def produce_advice(self, data_item: DataItem) -> None: # type: ignore - """Produce advice.""" - - @produce_advice.register - @advice_category(AdviceCategory.OPERATORS, AdviceCategory.ALL) - def handle_cpu_only_ops(self, data_item: HasCPUOnlyOperators) -> None: - """Advice for CPU only operators.""" - cpu_only_ops = ",".join(sorted(set(data_item.cpu_only_ops))) - cpu_only_ops_num = len(data_item.cpu_only_ops) - - self.add_advice( - [ - f"You have at least {cpu_only_ops_num} " - f"operator{'s' if cpu_only_ops_num > 1 else ''} that is CPU " - f"only: {cpu_only_ops}.", - "Using operators that are supported by the NPU will " - "improve performance.", - ] - + self.context.action_resolver.supported_operators_info() - ) - - @produce_advice.register - @advice_category(AdviceCategory.OPERATORS, AdviceCategory.ALL) - def handle_unsupported_operators( - self, data_item: HasUnsupportedOnNPUOperators - ) -> None: - """Advice for the unsupported operators.""" - self.add_advice( - [ - f"You have {data_item.npu_unsupported_ratio*100:.0f}% of operators " - "that cannot be placed on the NPU.", - "For better performance, please review the reasons reported " - "in the table, and adjust the model accordingly " - "where possible.", - ] - ) - - @produce_advice.register - @advice_category(AdviceCategory.OPERATORS, AdviceCategory.ALL) - def handle_all_operators_supported( - self, _data_item: AllOperatorsSupportedOnNPU - ) -> None: - """Advice if all operators supported.""" - self.add_advice( - [ - "You don't have any unsupported operators, your model will " - "run completely on NPU." - ] - + self.context.action_resolver.check_performance() - ) - - @produce_advice.register - @advice_category(AdviceCategory.OPTIMIZATION, AdviceCategory.ALL) - def handle_optimization_results(self, data_item: OptimizationResults) -> None: - """Advice based on optimization results.""" - if not data_item.diffs or len(data_item.diffs) != 1: - return - - optim_details = data_item.diffs[0] - metrics = [ - (metric_name, optim_details.opt_diffs[metric_key]) - for (metric_name, metric_key) in ( - ("DRAM used (KB)", "dram"), - ("SRAM used (KB)", "sram"), - ("On chip flash used (KB)", "on_chip_flash"), - ("Off chip flash used (KB)", "off_chip_flash"), - ("NPU total cycles", "npu_total_cycles"), - ) - if metric_key in optim_details.opt_diffs - and not optim_details.opt_diffs[metric_key].same - ] - - improved = [ - f"- You have achieved {abs(metric_value.diff):.2f}% performance " - f"improvement in {metric_name}" - for metric_name, metric_value in metrics - if metric_value.improved - ] - - degraded = [ - f"- {metric_name} have degraded by {abs(metric_value.diff):.2f}%" - for metric_name, metric_value in metrics - if metric_value.degraded - ] - - opts = ", ".join(str(s) for s in optim_details.opt_type) - messages = [f"With the selected optimization ({opts})", *improved, *degraded] - - if improved: - if next_optimization_target := self.get_next_optimization_targets( - optim_details.opt_type - ): - next_optimization_target_as_str = " and/or ".join( - str(item) for item in next_optimization_target - ) - - messages.append( - "You can try to push the optimization target higher " - f"(e.g. {next_optimization_target_as_str}) " - "to check if those results can be further improved." - ) - messages += self.context.action_resolver.apply_optimizations( - opt_settings=next_optimization_target - ) - - elif degraded: - messages.append( - "The performance seems to have degraded after " - "applying the selected optimizations, " - "try exploring different optimization types/targets." - ) - - self.add_advice(messages) - - self.add_advice( - [ - "The applied tooling techniques have an impact " - "on accuracy. Additional hyperparameter tuning may be required " - "after any optimization." - ] - ) - - @staticmethod - def get_next_optimization_targets( - opt_type: list[OptimizationSettings], - ) -> list[OptimizationSettings]: - """Get next optimization targets.""" - next_targets = (item.next_target() for item in opt_type) - - # filter out targets that have not been changed - valid_targets = [ - next_ - for next_, old in zip(next_targets, opt_type) - if ( - old.optimization_type == "pruning" - and old.optimization_target < next_.optimization_target - ) - or ( - old.optimization_type == "clustering" - and old.optimization_target > next_.optimization_target - ) - ] - return valid_targets - - -class EthosUStaticAdviceProducer(ContextAwareAdviceProducer): - """Advice producer that not depends on input data.""" - - def produce_advice(self, data_item: DataItem) -> None: - """Do not process passed data items.""" - - def get_advice(self) -> Advice | list[Advice]: - """Return predefined advice based on category.""" - advice_per_category = { - AdviceCategory.PERFORMANCE: [ - Advice( - [ - "You can improve the inference time by using only operators " - "that are supported by the NPU.", - ] - + self.context.action_resolver.check_operator_compatibility() - ), - Advice( - [ - "Check if you can improve the performance by applying " - "tooling techniques to your model." - ] - + self.context.action_resolver.apply_optimizations() - ), - ], - AdviceCategory.OPTIMIZATION: [ - Advice( - [ - "For better performance, make sure that all the operators " - "of your final TensorFlow Lite model are supported by the NPU.", - ] - + self.context.action_resolver.operator_compatibility_details() - ) - ], - } - - return advice_per_category.get(self.context.advice_category, []) diff --git a/src/mlia/devices/ethosu/advisor.py b/src/mlia/devices/ethosu/advisor.py deleted file mode 100644 index 2c25f6c..0000000 --- a/src/mlia/devices/ethosu/advisor.py +++ /dev/null @@ -1,194 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Ethos-U MLIA module.""" -from __future__ import annotations - -from pathlib import Path -from typing import Any - -from mlia.core.advice_generation import AdviceProducer -from mlia.core.advisor import DefaultInferenceAdvisor -from mlia.core.advisor import InferenceAdvisor -from mlia.core.common import AdviceCategory -from mlia.core.context import Context -from mlia.core.context import ExecutionContext -from mlia.core.data_analysis import DataAnalyzer -from mlia.core.data_collection import DataCollector -from mlia.core.events import Event -from mlia.core.typing import PathOrFileLike -from mlia.devices.ethosu.advice_generation import EthosUAdviceProducer -from mlia.devices.ethosu.advice_generation import EthosUStaticAdviceProducer -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.config import get_target -from mlia.devices.ethosu.data_analysis import EthosUDataAnalyzer -from mlia.devices.ethosu.data_collection import EthosUOperatorCompatibility -from mlia.devices.ethosu.data_collection import EthosUOptimizationPerformance -from mlia.devices.ethosu.data_collection import EthosUPerformance -from mlia.devices.ethosu.events import EthosUAdvisorStartedEvent -from mlia.devices.ethosu.handlers import EthosUEventHandler -from mlia.nn.tensorflow.utils import is_tflite_model -from mlia.utils.types import is_list_of - - -class EthosUInferenceAdvisor(DefaultInferenceAdvisor): - """Ethos-U Inference Advisor.""" - - @classmethod - def name(cls) -> str: - """Return name of the advisor.""" - return "ethos_u_inference_advisor" - - def get_collectors(self, context: Context) -> list[DataCollector]: - """Return list of the data collectors.""" - model = self.get_model(context) - device = self._get_device(context) - backends = self._get_backends(context) - - collectors: list[DataCollector] = [] - - if AdviceCategory.OPERATORS in context.advice_category: - collectors.append(EthosUOperatorCompatibility(model, device)) - - # Performance and optimization are mutually exclusive. - # Decide which one to use (taking into account the model format). - if is_tflite_model(model): - # TensorFlow Lite models do not support optimization (only performance)! - if context.advice_category == AdviceCategory.OPTIMIZATION: - raise Exception( - "Command 'optimization' is not supported for TensorFlow Lite files." - ) - if AdviceCategory.PERFORMANCE in context.advice_category: - collectors.append(EthosUPerformance(model, device, backends)) - else: - # Keras/SavedModel: Prefer optimization - if AdviceCategory.OPTIMIZATION in context.advice_category: - optimization_settings = self._get_optimization_settings(context) - collectors.append( - EthosUOptimizationPerformance( - model, device, optimization_settings, backends - ) - ) - elif AdviceCategory.PERFORMANCE in context.advice_category: - collectors.append(EthosUPerformance(model, device, backends)) - - return collectors - - def get_analyzers(self, context: Context) -> list[DataAnalyzer]: - """Return list of the data analyzers.""" - return [ - EthosUDataAnalyzer(), - ] - - def get_producers(self, context: Context) -> list[AdviceProducer]: - """Return list of the advice producers.""" - return [ - EthosUAdviceProducer(), - EthosUStaticAdviceProducer(), - ] - - def get_events(self, context: Context) -> list[Event]: - """Return list of the startup events.""" - model = self.get_model(context) - device = self._get_device(context) - - return [ - EthosUAdvisorStartedEvent(device=device, model=model), - ] - - def _get_device(self, context: Context) -> EthosUConfiguration: - """Get device.""" - target_profile = self.get_target_profile(context) - - return get_target(target_profile) - - def _get_optimization_settings(self, context: Context) -> list[list[dict]]: - """Get optimization settings.""" - return self.get_parameter( # type: ignore - EthosUOptimizationPerformance.name(), - "optimizations", - expected_type=list, - expected=False, - context=context, - ) - - def _get_backends(self, context: Context) -> list[str] | None: - """Get list of backends.""" - return self.get_parameter( # type: ignore - self.name(), - "backends", - expected_type=list, - expected=False, - context=context, - ) - - -def configure_and_get_ethosu_advisor( - context: ExecutionContext, - target_profile: str, - model: str | Path, - output: PathOrFileLike | None = None, - **extra_args: Any, -) -> InferenceAdvisor: - """Create and configure Ethos-U advisor.""" - if context.event_handlers is None: - context.event_handlers = [EthosUEventHandler(output)] - - if context.config_parameters is None: - context.config_parameters = _get_config_parameters( - model, target_profile, **extra_args - ) - - return EthosUInferenceAdvisor() - - -_DEFAULT_OPTIMIZATION_TARGETS = [ - { - "optimization_type": "pruning", - "optimization_target": 0.5, - "layers_to_optimize": None, - }, - { - "optimization_type": "clustering", - "optimization_target": 32, - "layers_to_optimize": None, - }, -] - - -def _get_config_parameters( - model: str | Path, - target_profile: str, - **extra_args: Any, -) -> dict[str, Any]: - """Get configuration parameters for the advisor.""" - advisor_parameters: dict[str, Any] = { - "ethos_u_inference_advisor": { - "model": model, - "target_profile": target_profile, - }, - } - - # Specifying backends is optional (default is used) - backends = extra_args.get("backends") - if backends is not None: - if not is_list_of(backends, str): - raise Exception("Backends value has wrong format") - - advisor_parameters["ethos_u_inference_advisor"]["backends"] = backends - - optimization_targets = extra_args.get("optimization_targets") - if not optimization_targets: - optimization_targets = _DEFAULT_OPTIMIZATION_TARGETS - - if not is_list_of(optimization_targets, dict): - raise Exception("Optimization targets value has wrong format") - - advisor_parameters.update( - { - "ethos_u_model_optimizations": { - "optimizations": [optimization_targets], - }, - } - ) - - return advisor_parameters diff --git a/src/mlia/devices/ethosu/config.py b/src/mlia/devices/ethosu/config.py deleted file mode 100644 index f2e867e..0000000 --- a/src/mlia/devices/ethosu/config.py +++ /dev/null @@ -1,90 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Ethos-U configuration.""" -from __future__ import annotations - -import logging -from typing import Any - -from mlia.backend.vela.compiler import resolve_compiler_config -from mlia.backend.vela.compiler import VelaCompilerOptions -from mlia.devices.config import IPConfiguration -from mlia.utils.filesystem import get_profile -from mlia.utils.filesystem import get_vela_config - - -logger = logging.getLogger(__name__) - - -class EthosUConfiguration(IPConfiguration): - """Ethos-U configuration.""" - - def __init__(self, target_profile: str) -> None: - """Init Ethos-U target configuration.""" - target_data = get_profile(target_profile) - _check_target_data_complete(target_data) - - target = target_data["target"] - super().__init__(target) - - mac = target_data["mac"] - _check_device_options_valid(target, mac) - - self.mac = mac - self.compiler_options = VelaCompilerOptions( - system_config=target_data["system_config"], - memory_mode=target_data["memory_mode"], - config_files=str(get_vela_config()), - accelerator_config=f"{self.target}-{mac}", # type: ignore - ) - - @property - def resolved_compiler_config(self) -> dict[str, Any]: - """Resolve compiler configuration.""" - return resolve_compiler_config(self.compiler_options) - - def __str__(self) -> str: - """Return string representation.""" - return ( - f"Ethos-U target={self.target} " - f"mac={self.mac} " - f"compiler_options={self.compiler_options}" - ) - - def __repr__(self) -> str: - """Return string representation.""" - return f"" - - -def get_target(target_profile: str) -> EthosUConfiguration: - """Get target instance based on provided params.""" - if not target_profile: - raise Exception("No target profile given") - - return EthosUConfiguration(target_profile) - - -def _check_target_data_complete(target_data: dict[str, Any]) -> None: - """Check if profile contains all needed data.""" - mandatory_keys = {"target", "mac", "system_config", "memory_mode"} - missing_keys = sorted(mandatory_keys - target_data.keys()) - - if missing_keys: - raise Exception(f"Mandatory fields missing from target profile: {missing_keys}") - - -def _check_device_options_valid(target: str, mac: int) -> None: - """Check if mac is valid for selected device.""" - target_mac_ranges = { - "ethos-u55": [32, 64, 128, 256], - "ethos-u65": [256, 512], - } - - if target not in target_mac_ranges: - raise Exception(f"Unsupported target: {target}") - - target_mac_range = target_mac_ranges[target] - if mac not in target_mac_range: - raise Exception( - f"Mac value for selected device should be in {target_mac_range}" - ) diff --git a/src/mlia/devices/ethosu/data_analysis.py b/src/mlia/devices/ethosu/data_analysis.py deleted file mode 100644 index db89a5f..0000000 --- a/src/mlia/devices/ethosu/data_analysis.py +++ /dev/null @@ -1,153 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Ethos-U data analysis module.""" -from __future__ import annotations - -from dataclasses import dataclass -from functools import singledispatchmethod - -from mlia.backend.vela.compat import Operators -from mlia.core.common import DataItem -from mlia.core.data_analysis import Fact -from mlia.core.data_analysis import FactExtractor -from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings - - -@dataclass -class HasCPUOnlyOperators(Fact): - """Model has CPU only operators.""" - - cpu_only_ops: list[str] - - -@dataclass -class HasUnsupportedOnNPUOperators(Fact): - """Model has unsupported on NPU operators.""" - - npu_unsupported_ratio: float - - -@dataclass -class AllOperatorsSupportedOnNPU(Fact): - """All model's operators supported on NPU.""" - - -@dataclass -class PerfMetricDiff: - """Performance metric difference.""" - - original_value: int | float - optimized_value: int | float - - @property - def diff(self) -> float: - """Difference between metrics.""" - if self.original_value == 0: - return 0 - - return 100 - ((self.optimized_value / self.original_value) * 100) - - @property - def improved(self) -> bool: - """Return true if metric improved.""" - return self.diff > 0 - - @property - def degraded(self) -> bool: - """Return true if metric degraded.""" - return self.diff < 0 - - @property - def same(self) -> bool: - """Return true if metric stays the same.""" - return self.diff == 0 - - -@dataclass -class OptimizationDiff: - """Optimization performance impact.""" - - opt_type: list[OptimizationSettings] - opt_diffs: dict[str, PerfMetricDiff] - - -@dataclass -class OptimizationResults(Fact): - """Optimization results.""" - - diffs: list[OptimizationDiff] - - -class EthosUDataAnalyzer(FactExtractor): - """Ethos-U data analyzer.""" - - @singledispatchmethod - def analyze_data(self, data_item: DataItem) -> None: # type: ignore - """Analyse the data.""" - - @analyze_data.register - def analyze_operator_compatibility(self, operators: Operators) -> None: - """Analyse operator compatibility information.""" - cpu_only = [op.op_type for op in operators.ops if op.cpu_only] - if cpu_only: - self.add_fact(HasCPUOnlyOperators(cpu_only)) - - if operators.npu_unsupported_ratio != 0: - self.add_fact(HasUnsupportedOnNPUOperators(operators.npu_unsupported_ratio)) - - if operators.npu_unsupported_ratio == 0: - self.add_fact(AllOperatorsSupportedOnNPU()) - - @analyze_data.register - def analyze_optimization_results( - self, optimization_results: OptimizationPerformanceMetrics - ) -> None: - """Analyse optimization performance metrics.""" - optimizations = optimization_results.optimizations_perf_metrics - if not optimizations: - return - - orig = optimization_results.original_perf_metrics.in_kilobytes() - orig_memory = orig.memory_usage - orig_cycles = orig.npu_cycles - - diffs: list[OptimizationDiff] = [] - for opt_type, opt_perf_metrics in optimizations: - opt = opt_perf_metrics.in_kilobytes() - opt_memory = opt.memory_usage - opt_cycles = opt.npu_cycles - - opt_diffs: dict[str, PerfMetricDiff] = {} - - if orig_memory and opt_memory: - opt_diffs.update( - { - "sram": PerfMetricDiff( - orig_memory.sram_memory_area_size, - opt_memory.sram_memory_area_size, - ), - "dram": PerfMetricDiff( - orig_memory.dram_memory_area_size, - opt_memory.dram_memory_area_size, - ), - "on_chip_flash": PerfMetricDiff( - orig_memory.on_chip_flash_memory_area_size, - opt_memory.on_chip_flash_memory_area_size, - ), - "off_chip_flash": PerfMetricDiff( - orig_memory.off_chip_flash_memory_area_size, - opt_memory.off_chip_flash_memory_area_size, - ), - } - ) - if orig_cycles and opt_cycles: - opt_diffs["npu_total_cycles"] = PerfMetricDiff( - orig_cycles.npu_total_cycles, - opt_cycles.npu_total_cycles, - ) - - diff = OptimizationDiff(opt_type=opt_type, opt_diffs=opt_diffs) - diffs.append(diff) - - self.add_fact(OptimizationResults(diffs)) diff --git a/src/mlia/devices/ethosu/data_collection.py b/src/mlia/devices/ethosu/data_collection.py deleted file mode 100644 index d68eadb..0000000 --- a/src/mlia/devices/ethosu/data_collection.py +++ /dev/null @@ -1,187 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Data collection module for Ethos-U.""" -from __future__ import annotations - -import logging -from pathlib import Path - -from mlia.backend.vela.compat import Operators -from mlia.backend.vela.compat import supported_operators -from mlia.core.context import Context -from mlia.core.data_collection import ContextAwareDataCollector -from mlia.core.errors import FunctionalityNotSupportedError -from mlia.core.performance import estimate_performance -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.performance import EthosUPerformanceEstimator -from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics -from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.nn.tensorflow.config import get_keras_model -from mlia.nn.tensorflow.config import get_tflite_model -from mlia.nn.tensorflow.config import KerasModel -from mlia.nn.tensorflow.optimizations.select import get_optimizer -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings -from mlia.nn.tensorflow.utils import save_keras_model -from mlia.utils.logging import log_action -from mlia.utils.types import is_list_of - -logger = logging.getLogger(__name__) - - -class EthosUOperatorCompatibility(ContextAwareDataCollector): - """Collect operator compatibility information.""" - - def __init__(self, model: Path, device: EthosUConfiguration) -> None: - """Init operator compatibility data collector.""" - self.model = model - self.device = device - - def collect_data(self) -> Operators: - """Collect operator compatibility information.""" - tflite_model = get_tflite_model(self.model, self.context) - - with log_action("Checking operator compatibility ..."): - return supported_operators( - Path(tflite_model.model_path), self.device.compiler_options - ) - - @classmethod - def name(cls) -> str: - """Return name of the collector.""" - return "ethos_u_operator_compatibility" - - -class EthosUPerformance(ContextAwareDataCollector): - """Collect performance metrics.""" - - def __init__( - self, - model: Path, - device: EthosUConfiguration, - backends: list[str] | None = None, - ) -> None: - """Init performance data collector.""" - self.model = model - self.device = device - self.backends = backends - - def collect_data(self) -> PerformanceMetrics: - """Collect model performance metrics.""" - tflite_model = get_tflite_model(self.model, self.context) - estimator = EthosUPerformanceEstimator( - self.context, - self.device, - self.backends, - ) - - return estimator.estimate(tflite_model) - - @classmethod - def name(cls) -> str: - """Return name of the collector.""" - return "ethos_u_performance" - - -class OptimizeModel: - """Helper class for model optimization.""" - - def __init__( - self, context: Context, opt_settings: list[OptimizationSettings] - ) -> None: - """Init helper.""" - self.context = context - self.opt_settings = opt_settings - - def __call__(self, keras_model: KerasModel) -> KerasModel: - """Run optimization.""" - optimizer = get_optimizer(keras_model, self.opt_settings) - - opts_as_str = ", ".join(str(opt) for opt in self.opt_settings) - logger.info("Applying model optimizations - [%s]", opts_as_str) - optimizer.apply_optimization() - - model = optimizer.get_model() - model_path = self.context.get_model_path("optimized_model.h5") - save_keras_model(model, model_path) - - return KerasModel(model_path) - - -class EthosUOptimizationPerformance(ContextAwareDataCollector): - """Collect performance metrics for the optimizations.""" - - def __init__( - self, - model: Path, - device: EthosUConfiguration, - optimizations: list[list[dict]], - backends: list[str] | None = None, - ) -> None: - """Init performance optimizations data collector.""" - self.model = model - self.device = device - self.optimizations = optimizations - self.backends = backends - - def collect_data(self) -> OptimizationPerformanceMetrics | None: - """Collect performance metrics for the optimizations.""" - logger.info("Estimate performance ...") - - if not self.optimizations: - raise FunctionalityNotSupportedError( - reason="Unable to estimate model optimizations impact", - description="No optimization targets provided", - ) - - opt_settings = self._parse_optimization_params(self.optimizations) - - try: - keras_model = get_keras_model(self.model, self.context) - except NotImplementedError as err: - raise FunctionalityNotSupportedError( - reason="Unable to run model optimizations", - description=f"{self.model} is not a Keras model and " - "could not be converted to a Keras model", - ) from err - - optimizers = [OptimizeModel(self.context, opts) for opts in opt_settings] - - estimator = EthosUPerformanceEstimator( - self.context, - self.device, - self.backends, - ) - original_metrics, *optimized_metrics = estimate_performance( - keras_model, estimator, optimizers # type: ignore - ) - - result = OptimizationPerformanceMetrics( - original_perf_metrics=original_metrics, - optimizations_perf_metrics=list(zip(opt_settings, optimized_metrics)), - ) - return result - - @staticmethod - def _parse_optimization_params( - optimizations: list[list[dict]], - ) -> list[list[OptimizationSettings]]: - """Parse optimization parameters.""" - if not is_list_of(optimizations, list): - raise Exception("Optimization parameters expected to be a list") - - return [ - [ - OptimizationSettings( - item.get("optimization_type"), # type: ignore - item.get("optimization_target"), # type: ignore - item.get("layers_to_optimized"), - ) - for item in opt_configuration - ] - for opt_configuration in optimizations - ] - - @classmethod - def name(cls) -> str: - """Return name of the collector.""" - return "ethos_u_model_optimizations" diff --git a/src/mlia/devices/ethosu/events.py b/src/mlia/devices/ethosu/events.py deleted file mode 100644 index d5408b0..0000000 --- a/src/mlia/devices/ethosu/events.py +++ /dev/null @@ -1,24 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Ethos-U MLIA module events.""" -from dataclasses import dataclass -from pathlib import Path - -from mlia.core.events import Event -from mlia.core.events import EventDispatcher -from mlia.devices.ethosu.config import EthosUConfiguration - - -@dataclass -class EthosUAdvisorStartedEvent(Event): - """Event with Ethos-U advisor parameters.""" - - model: Path - device: EthosUConfiguration - - -class EthosUAdvisorEventHandler(EventDispatcher): - """Event handler for the Ethos-U inference advisor.""" - - def on_ethos_u_advisor_started(self, event: EthosUAdvisorStartedEvent) -> None: - """Handle EthosUAdvisorStarted event.""" diff --git a/src/mlia/devices/ethosu/handlers.py b/src/mlia/devices/ethosu/handlers.py deleted file mode 100644 index f010bdb..0000000 --- a/src/mlia/devices/ethosu/handlers.py +++ /dev/null @@ -1,55 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Event handler.""" -from __future__ import annotations - -import logging - -from mlia.backend.vela.compat import Operators -from mlia.core.events import CollectedDataEvent -from mlia.core.handlers import WorkflowEventsHandler -from mlia.core.typing import PathOrFileLike -from mlia.devices.ethosu.events import EthosUAdvisorEventHandler -from mlia.devices.ethosu.events import EthosUAdvisorStartedEvent -from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics -from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.devices.ethosu.reporters import ethos_u_formatters - -logger = logging.getLogger(__name__) - - -class EthosUEventHandler(WorkflowEventsHandler, EthosUAdvisorEventHandler): - """CLI event handler.""" - - def __init__(self, output: PathOrFileLike | None = None) -> None: - """Init event handler.""" - super().__init__(ethos_u_formatters, output) - - def on_collected_data(self, event: CollectedDataEvent) -> None: - """Handle CollectedDataEvent event.""" - data_item = event.data_item - - if isinstance(data_item, Operators): - self.reporter.submit([data_item.ops, data_item], delay_print=True) - - if isinstance(data_item, PerformanceMetrics): - self.reporter.submit(data_item, delay_print=True, space=True) - - if isinstance(data_item, OptimizationPerformanceMetrics): - original_metrics = data_item.original_perf_metrics - if not data_item.optimizations_perf_metrics: - return - - _opt_settings, optimized_metrics = data_item.optimizations_perf_metrics[0] - - self.reporter.submit( - [original_metrics, optimized_metrics], - delay_print=True, - columns_name="Metrics", - title="Performance metrics", - space=True, - ) - - def on_ethos_u_advisor_started(self, event: EthosUAdvisorStartedEvent) -> None: - """Handle EthosUAdvisorStarted event.""" - self.reporter.submit(event.device) diff --git a/src/mlia/devices/ethosu/operators.py b/src/mlia/devices/ethosu/operators.py deleted file mode 100644 index 97c2b17..0000000 --- a/src/mlia/devices/ethosu/operators.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Operators module.""" -import logging - -from mlia.backend.vela.compat import generate_supported_operators_report - - -logger = logging.getLogger(__name__) - - -def report() -> None: - """Generate supported operators report.""" - generate_supported_operators_report() diff --git a/src/mlia/devices/ethosu/performance.py b/src/mlia/devices/ethosu/performance.py deleted file mode 100644 index 8051d6e..0000000 --- a/src/mlia/devices/ethosu/performance.py +++ /dev/null @@ -1,261 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Performance estimation.""" -from __future__ import annotations - -import logging -from dataclasses import dataclass -from enum import Enum -from pathlib import Path -from typing import Union - -import mlia.backend.vela.compiler as vela_comp -import mlia.backend.vela.performance as vela_perf -from mlia.backend.corstone.performance import DeviceInfo -from mlia.backend.corstone.performance import estimate_performance -from mlia.backend.corstone.performance import ModelInfo -from mlia.backend.install import is_supported -from mlia.backend.install import supported_backends -from mlia.core.context import Context -from mlia.core.performance import PerformanceEstimator -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.nn.tensorflow.config import get_tflite_model -from mlia.nn.tensorflow.config import ModelConfiguration -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings -from mlia.utils.logging import log_action - - -logger = logging.getLogger(__name__) - - -@dataclass -class NPUCycles: - """NPU cycles metrics.""" - - npu_active_cycles: int - npu_idle_cycles: int - npu_total_cycles: int - npu_axi0_rd_data_beat_received: int - npu_axi0_wr_data_beat_written: int - npu_axi1_rd_data_beat_received: int - - -BYTES_PER_KILOBYTE = 1024 - - -class MemorySizeType(Enum): - """Memory size type enumeration.""" - - BYTES = 0 - KILOBYTES = 1 - - -@dataclass -class MemoryUsage: - """Memory usage metrics.""" - - sram_memory_area_size: int | float - dram_memory_area_size: int | float - unknown_memory_area_size: int | float - on_chip_flash_memory_area_size: int | float - off_chip_flash_memory_area_size: int | float - memory_size_type: MemorySizeType = MemorySizeType.BYTES - - _default_columns = [ - "SRAM used", - "DRAM used", - "Unknown memory used", - "On chip flash used", - "Off chip flash used", - ] - - def in_kilobytes(self) -> MemoryUsage: - """Return memory usage with values in kilobytes.""" - if self.memory_size_type == MemorySizeType.KILOBYTES: - return self - - kilobytes = [ - value / BYTES_PER_KILOBYTE - for value in [ - self.sram_memory_area_size, - self.dram_memory_area_size, - self.unknown_memory_area_size, - self.on_chip_flash_memory_area_size, - self.off_chip_flash_memory_area_size, - ] - ] - - return MemoryUsage( - *kilobytes, # type: ignore - memory_size_type=MemorySizeType.KILOBYTES, - ) - - -@dataclass -class PerformanceMetrics: - """Performance metrics.""" - - device: EthosUConfiguration - npu_cycles: NPUCycles | None - memory_usage: MemoryUsage | None - - def in_kilobytes(self) -> PerformanceMetrics: - """Return metrics with memory usage in KiB.""" - if self.memory_usage is None: - return PerformanceMetrics(self.device, self.npu_cycles, self.memory_usage) - - return PerformanceMetrics( - self.device, self.npu_cycles, self.memory_usage.in_kilobytes() - ) - - -@dataclass -class OptimizationPerformanceMetrics: - """Optimization performance metrics.""" - - original_perf_metrics: PerformanceMetrics - optimizations_perf_metrics: list[ - tuple[list[OptimizationSettings], PerformanceMetrics] - ] - - -class VelaPerformanceEstimator( - PerformanceEstimator[Union[Path, ModelConfiguration], MemoryUsage] -): - """Vela based performance estimator.""" - - def __init__(self, context: Context, device: EthosUConfiguration) -> None: - """Init Vela based performance estimator.""" - self.context = context - self.device = device - - def estimate(self, model: Path | ModelConfiguration) -> MemoryUsage: - """Estimate performance.""" - with log_action("Getting the memory usage metrics ..."): - model_path = ( - Path(model.model_path) - if isinstance(model, ModelConfiguration) - else model - ) - - vela_perf_metrics = vela_perf.estimate_performance( - model_path, self.device.compiler_options - ) - - return MemoryUsage( - vela_perf_metrics.sram_memory_area_size, - vela_perf_metrics.dram_memory_area_size, - vela_perf_metrics.unknown_memory_area_size, - vela_perf_metrics.on_chip_flash_memory_area_size, - vela_perf_metrics.off_chip_flash_memory_area_size, - ) - - -class CorstonePerformanceEstimator( - PerformanceEstimator[Union[Path, ModelConfiguration], NPUCycles] -): - """Corstone-based performance estimator.""" - - def __init__( - self, context: Context, device: EthosUConfiguration, backend: str - ) -> None: - """Init Corstone-based performance estimator.""" - self.context = context - self.device = device - self.backend = backend - - def estimate(self, model: Path | ModelConfiguration) -> NPUCycles: - """Estimate performance.""" - with log_action(f"Getting the performance metrics for '{self.backend}' ..."): - logger.info( - "WARNING: This task may require several minutes " - "(press ctrl-c to interrupt)" - ) - - model_path = ( - Path(model.model_path) - if isinstance(model, ModelConfiguration) - else model - ) - - optimized_model_path = self.context.get_model_path( - f"{model_path.stem}_vela.tflite" - ) - - vela_comp.optimize_model( - model_path, self.device.compiler_options, optimized_model_path - ) - - model_info = ModelInfo(model_path=optimized_model_path) - device_info = DeviceInfo( - device_type=self.device.target, # type: ignore - mac=self.device.mac, - ) - - corstone_perf_metrics = estimate_performance( - model_info, device_info, self.backend - ) - - return NPUCycles( - corstone_perf_metrics.npu_active_cycles, - corstone_perf_metrics.npu_idle_cycles, - corstone_perf_metrics.npu_total_cycles, - corstone_perf_metrics.npu_axi0_rd_data_beat_received, - corstone_perf_metrics.npu_axi0_wr_data_beat_written, - corstone_perf_metrics.npu_axi1_rd_data_beat_received, - ) - - -class EthosUPerformanceEstimator( - PerformanceEstimator[Union[Path, ModelConfiguration], PerformanceMetrics] -): - """Ethos-U performance estimator.""" - - def __init__( - self, - context: Context, - device: EthosUConfiguration, - backends: list[str] | None = None, - ) -> None: - """Init performance estimator.""" - self.context = context - self.device = device - if backends is None: - backends = ["Vela"] # Only Vela is always available as default - for backend in backends: - if backend != "Vela" and not is_supported(backend): - raise ValueError( - f"Unsupported backend '{backend}'. " - f"Only 'Vela' and {supported_backends()} " - "are supported." - ) - self.backends = set(backends) - - def estimate(self, model: Path | ModelConfiguration) -> PerformanceMetrics: - """Estimate performance.""" - model_path = ( - Path(model.model_path) if isinstance(model, ModelConfiguration) else model - ) - - tflite_model = get_tflite_model(model_path, self.context) - - memory_usage = None - npu_cycles = None - - for backend in self.backends: - if backend == "Vela": - vela_estimator = VelaPerformanceEstimator(self.context, self.device) - memory_usage = vela_estimator.estimate(tflite_model) - elif backend in supported_backends(): - corstone_estimator = CorstonePerformanceEstimator( - self.context, self.device, backend - ) - npu_cycles = corstone_estimator.estimate(tflite_model) - else: - logger.warning( - "Backend '%s' is not supported for Ethos-U performance " - "estimation.", - backend, - ) - - return PerformanceMetrics(self.device, npu_cycles, memory_usage) diff --git a/src/mlia/devices/ethosu/reporters.py b/src/mlia/devices/ethosu/reporters.py deleted file mode 100644 index 7ecaab1..0000000 --- a/src/mlia/devices/ethosu/reporters.py +++ /dev/null @@ -1,385 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Reports module.""" -from __future__ import annotations - -from collections import defaultdict -from typing import Any -from typing import Callable - -from mlia.backend.vela.compat import Operator -from mlia.backend.vela.compat import Operators -from mlia.core.advice_generation import Advice -from mlia.core.reporters import report_advice -from mlia.core.reporting import BytesCell -from mlia.core.reporting import Cell -from mlia.core.reporting import ClockCell -from mlia.core.reporting import Column -from mlia.core.reporting import CompoundFormatter -from mlia.core.reporting import CyclesCell -from mlia.core.reporting import Format -from mlia.core.reporting import NestedReport -from mlia.core.reporting import Report -from mlia.core.reporting import ReportItem -from mlia.core.reporting import SingleRow -from mlia.core.reporting import Table -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.utils.console import style_improvement -from mlia.utils.types import is_list_of - - -def report_operators_stat(operators: Operators) -> Report: - """Return table representation for the ops stats.""" - columns = [ - Column("Number of operators", alias="num_of_operators"), - Column("Number of NPU supported operators", "num_of_npu_supported_operators"), - Column("Unsupported ops ratio", "npu_unsupported_ratio"), - ] - rows = [ - ( - operators.total_number, - operators.npu_supported_number, - Cell( - operators.npu_unsupported_ratio * 100, - fmt=Format(str_fmt="{:.0f}%".format), - ), - ) - ] - - return SingleRow( - columns, rows, name="Operators statistics", alias="operators_stats" - ) - - -def report_operators(ops: list[Operator]) -> Report: - """Return table representation for the list of operators.""" - columns = [ - Column("#", only_for=["plain_text"]), - Column( - "Operator name", - alias="operator_name", - fmt=Format(wrap_width=30), - ), - Column( - "Operator type", - alias="operator_type", - fmt=Format(wrap_width=25), - ), - Column( - "Placement", - alias="placement", - fmt=Format(wrap_width=20), - ), - Column( - "Notes", - alias="notes", - fmt=Format(wrap_width=35), - ), - ] - - rows = [ - ( - i + 1, - op.name, - op.op_type, - Cell( - "NPU" if (npu := op.run_on_npu.supported) else "CPU", - Format(style=style_improvement(npu)), - ), - Table( - columns=[ - Column( - "Note", - alias="note", - fmt=Format(wrap_width=35), - ) - ], - rows=[ - (Cell(item, Format(str_fmt=lambda x: f"* {x}")),) - for reason in op.run_on_npu.reasons - for item in reason - if item - ], - name="Notes", - ), - ) - for i, op in enumerate(ops) - ] - - return Table(columns, rows, name="Operators", alias="operators") - - -def report_device_details(device: EthosUConfiguration) -> Report: - """Return table representation for the device.""" - compiler_config = device.resolved_compiler_config - - memory_settings = [ - ReportItem( - "Const mem area", - "const_mem_area", - compiler_config["const_mem_area"], - ), - ReportItem( - "Arena mem area", - "arena_mem_area", - compiler_config["arena_mem_area"], - ), - ReportItem( - "Cache mem area", - "cache_mem_area", - compiler_config["cache_mem_area"], - ), - ReportItem( - "Arena cache size", - "arena_cache_size", - BytesCell(compiler_config["arena_cache_size"]), - ), - ] - - mem_areas_settings = [ - ReportItem( - f"{mem_area_name}", - mem_area_name, - None, - nested_items=[ - ReportItem( - "Clock scales", - "clock_scales", - mem_area_settings["clock_scales"], - ), - ReportItem( - "Burst length", - "burst_length", - BytesCell(mem_area_settings["burst_length"]), - ), - ReportItem( - "Read latency", - "read_latency", - CyclesCell(mem_area_settings["read_latency"]), - ), - ReportItem( - "Write latency", - "write_latency", - CyclesCell(mem_area_settings["write_latency"]), - ), - ], - ) - for mem_area_name, mem_area_settings in compiler_config["memory_area"].items() - ] - - system_settings = [ - ReportItem( - "Accelerator clock", - "accelerator_clock", - ClockCell(compiler_config["core_clock"]), - ), - ReportItem( - "AXI0 port", - "axi0_port", - compiler_config["axi0_port"], - ), - ReportItem( - "AXI1 port", - "axi1_port", - compiler_config["axi1_port"], - ), - ReportItem( - "Memory area settings", "memory_area", None, nested_items=mem_areas_settings - ), - ] - - arch_settings = [ - ReportItem( - "Permanent storage mem area", - "permanent_storage_mem_area", - compiler_config["permanent_storage_mem_area"], - ), - ReportItem( - "Feature map storage mem area", - "feature_map_storage_mem_area", - compiler_config["feature_map_storage_mem_area"], - ), - ReportItem( - "Fast storage mem area", - "fast_storage_mem_area", - compiler_config["fast_storage_mem_area"], - ), - ] - - return NestedReport( - "Device information", - "device", - [ - ReportItem("Target", alias="target", value=device.target), - ReportItem("MAC", alias="mac", value=device.mac), - ReportItem( - "Memory mode", - alias="memory_mode", - value=compiler_config["memory_mode"], - nested_items=memory_settings, - ), - ReportItem( - "System config", - alias="system_config", - value=compiler_config["system_config"], - nested_items=system_settings, - ), - ReportItem( - "Architecture settings", - "arch_settings", - None, - nested_items=arch_settings, - ), - ], - ) - - -def metrics_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: - """Convert perf metrics object into list of records.""" - perf_metrics = [item.in_kilobytes() for item in perf_metrics] - - def _cycles_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: - metric_map = defaultdict(list) - for metrics in perf_metrics: - if not metrics.npu_cycles: - return [] - metric_map["NPU active cycles"].append(metrics.npu_cycles.npu_active_cycles) - metric_map["NPU idle cycles"].append(metrics.npu_cycles.npu_idle_cycles) - metric_map["NPU total cycles"].append(metrics.npu_cycles.npu_total_cycles) - - return [ - (name, *(Cell(value, Format(str_fmt="12,d")) for value in values), "cycles") - for name, values in metric_map.items() - ] - - def _memory_usage_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: - metric_map = defaultdict(list) - for metrics in perf_metrics: - if not metrics.memory_usage: - return [] - metric_map["SRAM used"].append(metrics.memory_usage.sram_memory_area_size) - metric_map["DRAM used"].append(metrics.memory_usage.dram_memory_area_size) - metric_map["Unknown memory area used"].append( - metrics.memory_usage.unknown_memory_area_size - ) - metric_map["On-chip flash used"].append( - metrics.memory_usage.on_chip_flash_memory_area_size - ) - metric_map["Off-chip flash used"].append( - metrics.memory_usage.off_chip_flash_memory_area_size - ) - - return [ - (name, *(Cell(value, Format(str_fmt="12.2f")) for value in values), "KiB") - for name, values in metric_map.items() - if all(val > 0 for val in values) - ] - - def _data_beats_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: - metric_map = defaultdict(list) - for metrics in perf_metrics: - if not metrics.npu_cycles: - return [] - metric_map["NPU AXI0 RD data beat received"].append( - metrics.npu_cycles.npu_axi0_rd_data_beat_received - ) - metric_map["NPU AXI0 WR data beat written"].append( - metrics.npu_cycles.npu_axi0_wr_data_beat_written - ) - metric_map["NPU AXI1 RD data beat received"].append( - metrics.npu_cycles.npu_axi1_rd_data_beat_received - ) - - return [ - (name, *(Cell(value, Format(str_fmt="12,d")) for value in values), "beats") - for name, values in metric_map.items() - ] - - return [ - metrics - for metrics_func in ( - _memory_usage_as_records, - _cycles_as_records, - _data_beats_as_records, - ) - for metrics in metrics_func(perf_metrics) - ] - - -def report_perf_metrics( - perf_metrics: PerformanceMetrics | list[PerformanceMetrics], -) -> Report: - """Return comparison table for the performance metrics.""" - if isinstance(perf_metrics, PerformanceMetrics): - perf_metrics = [perf_metrics] - - rows = metrics_as_records(perf_metrics) - - if len(perf_metrics) == 2: - return Table( - columns=[ - Column("Metric", alias="metric", fmt=Format(wrap_width=30)), - Column("Original", alias="original", fmt=Format(wrap_width=15)), - Column("Optimized", alias="optimized", fmt=Format(wrap_width=15)), - Column("Unit", alias="unit", fmt=Format(wrap_width=15)), - Column("Improvement (%)", alias="improvement"), - ], - rows=[ - ( - metric, - original_value, - optimized_value, - unit, - Cell( - ( - diff := 100 - - (optimized_value.value / original_value.value * 100) - ), - Format(str_fmt="15.2f", style=style_improvement(diff > 0)), - ) - if original_value.value != 0 - else None, - ) - for metric, original_value, optimized_value, unit in rows - ], - name="Performance metrics", - alias="performance_metrics", - notes="IMPORTANT: The performance figures above refer to NPU only", - ) - - return Table( - columns=[ - Column("Metric", alias="metric", fmt=Format(wrap_width=30)), - Column("Value", alias="value", fmt=Format(wrap_width=15)), - Column("Unit", alias="unit", fmt=Format(wrap_width=15)), - ], - rows=rows, - name="Performance metrics", - alias="performance_metrics", - notes="IMPORTANT: The performance figures above refer to NPU only", - ) - - -def ethos_u_formatters(data: Any) -> Callable[[Any], Report]: - """Find appropriate formatter for the provided data.""" - if isinstance(data, PerformanceMetrics) or is_list_of(data, PerformanceMetrics, 2): - return report_perf_metrics - - if is_list_of(data, Advice): - return report_advice - - if is_list_of(data, Operator): - return report_operators - - if isinstance(data, Operators): - return report_operators_stat - - if isinstance(data, EthosUConfiguration): - return report_device_details - - if isinstance(data, (list, tuple)): - formatters = [ethos_u_formatters(item) for item in data] - return CompoundFormatter(formatters) - - raise Exception(f"Unable to find appropriate formatter for {data}") diff --git a/src/mlia/devices/tosa/__init__.py b/src/mlia/devices/tosa/__init__.py deleted file mode 100644 index 762c831..0000000 --- a/src/mlia/devices/tosa/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA target module.""" diff --git a/src/mlia/devices/tosa/advice_generation.py b/src/mlia/devices/tosa/advice_generation.py deleted file mode 100644 index a3d8011..0000000 --- a/src/mlia/devices/tosa/advice_generation.py +++ /dev/null @@ -1,40 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA advice generation.""" -from functools import singledispatchmethod - -from mlia.core.advice_generation import advice_category -from mlia.core.advice_generation import FactBasedAdviceProducer -from mlia.core.common import AdviceCategory -from mlia.core.common import DataItem -from mlia.devices.tosa.data_analysis import ModelIsNotTOSACompatible -from mlia.devices.tosa.data_analysis import ModelIsTOSACompatible - - -class TOSAAdviceProducer(FactBasedAdviceProducer): - """TOSA advice producer.""" - - @singledispatchmethod - def produce_advice(self, _data_item: DataItem) -> None: # type: ignore - """Produce advice.""" - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_model_is_tosa_compatible( - self, _data_item: ModelIsTOSACompatible - ) -> None: - """Advice for TOSA compatibility.""" - self.add_advice(["Model is fully TOSA compatible."]) - - @produce_advice.register - @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) - def handle_model_is_not_tosa_compatible( - self, _data_item: ModelIsNotTOSACompatible - ) -> None: - """Advice for TOSA compatibility.""" - self.add_advice( - [ - "Some operators in the model are not TOSA compatible. " - "Please, refer to the operators table for more information." - ] - ) diff --git a/src/mlia/devices/tosa/advisor.py b/src/mlia/devices/tosa/advisor.py deleted file mode 100644 index 53dfa87..0000000 --- a/src/mlia/devices/tosa/advisor.py +++ /dev/null @@ -1,94 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA advisor.""" -from __future__ import annotations - -from pathlib import Path -from typing import Any - -from mlia.core.advice_generation import AdviceCategory -from mlia.core.advice_generation import AdviceProducer -from mlia.core.advisor import DefaultInferenceAdvisor -from mlia.core.advisor import InferenceAdvisor -from mlia.core.context import Context -from mlia.core.context import ExecutionContext -from mlia.core.data_analysis import DataAnalyzer -from mlia.core.data_collection import DataCollector -from mlia.core.events import Event -from mlia.core.typing import PathOrFileLike -from mlia.devices.tosa.advice_generation import TOSAAdviceProducer -from mlia.devices.tosa.config import TOSAConfiguration -from mlia.devices.tosa.data_analysis import TOSADataAnalyzer -from mlia.devices.tosa.data_collection import TOSAOperatorCompatibility -from mlia.devices.tosa.events import TOSAAdvisorStartedEvent -from mlia.devices.tosa.handlers import TOSAEventHandler - - -class TOSAInferenceAdvisor(DefaultInferenceAdvisor): - """TOSA inference advisor.""" - - @classmethod - def name(cls) -> str: - """Return name of the advisor.""" - return "tosa_inference_advisor" - - def get_collectors(self, context: Context) -> list[DataCollector]: - """Return list of the data collectors.""" - model = self.get_model(context) - - collectors: list[DataCollector] = [] - - if AdviceCategory.OPERATORS in context.advice_category: - collectors.append(TOSAOperatorCompatibility(model)) - - return collectors - - def get_analyzers(self, context: Context) -> list[DataAnalyzer]: - """Return list of the data analyzers.""" - return [ - TOSADataAnalyzer(), - ] - - def get_producers(self, context: Context) -> list[AdviceProducer]: - """Return list of the advice producers.""" - return [ - TOSAAdviceProducer(), - ] - - def get_events(self, context: Context) -> list[Event]: - """Return list of the startup events.""" - model = self.get_model(context) - target_profile = self.get_target_profile(context) - - return [ - TOSAAdvisorStartedEvent(model, TOSAConfiguration(target_profile)), - ] - - -def configure_and_get_tosa_advisor( - context: ExecutionContext, - target_profile: str, - model: str | Path, - output: PathOrFileLike | None = None, - **_extra_args: Any, -) -> InferenceAdvisor: - """Create and configure TOSA advisor.""" - if context.event_handlers is None: - context.event_handlers = [TOSAEventHandler(output)] - - if context.config_parameters is None: - context.config_parameters = _get_config_parameters(model, target_profile) - - return TOSAInferenceAdvisor() - - -def _get_config_parameters(model: str | Path, target_profile: str) -> dict[str, Any]: - """Get configuration parameters for the advisor.""" - advisor_parameters: dict[str, Any] = { - "tosa_inference_advisor": { - "model": str(model), - "target_profile": target_profile, - } - } - - return advisor_parameters diff --git a/src/mlia/devices/tosa/config.py b/src/mlia/devices/tosa/config.py deleted file mode 100644 index c3879a7..0000000 --- a/src/mlia/devices/tosa/config.py +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA target configuration.""" -from mlia.devices.config import IPConfiguration -from mlia.utils.filesystem import get_profile - - -class TOSAConfiguration(IPConfiguration): # pylint: disable=too-few-public-methods - """TOSA configuration.""" - - def __init__(self, target_profile: str) -> None: - """Init configuration.""" - target_data = get_profile(target_profile) - target = target_data["target"] - - if target != "tosa": - raise Exception(f"Wrong target {target} for TOSA configuration") - - super().__init__(target) diff --git a/src/mlia/devices/tosa/data_analysis.py b/src/mlia/devices/tosa/data_analysis.py deleted file mode 100644 index 7cbd61d..0000000 --- a/src/mlia/devices/tosa/data_analysis.py +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA data analysis module.""" -from dataclasses import dataclass -from functools import singledispatchmethod - -from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo -from mlia.core.common import DataItem -from mlia.core.data_analysis import Fact -from mlia.core.data_analysis import FactExtractor - - -@dataclass -class ModelIsTOSACompatible(Fact): - """Model is completely TOSA compatible.""" - - -@dataclass -class ModelIsNotTOSACompatible(Fact): - """Model is not TOSA compatible.""" - - -class TOSADataAnalyzer(FactExtractor): - """TOSA data analyzer.""" - - @singledispatchmethod - def analyze_data(self, data_item: DataItem) -> None: # type: ignore - """Analyse the data.""" - - @analyze_data.register - def analyze_tosa_compatibility(self, data_item: TOSACompatibilityInfo) -> None: - """Analyse TOSA compatibility information.""" - if data_item.tosa_compatible: - self.add_fact(ModelIsTOSACompatible()) - else: - self.add_fact(ModelIsNotTOSACompatible()) diff --git a/src/mlia/devices/tosa/data_collection.py b/src/mlia/devices/tosa/data_collection.py deleted file mode 100644 index 105c501..0000000 --- a/src/mlia/devices/tosa/data_collection.py +++ /dev/null @@ -1,30 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA data collection module.""" -from pathlib import Path - -from mlia.backend.tosa_checker.compat import get_tosa_compatibility_info -from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo -from mlia.core.data_collection import ContextAwareDataCollector -from mlia.nn.tensorflow.config import get_tflite_model -from mlia.utils.logging import log_action - - -class TOSAOperatorCompatibility(ContextAwareDataCollector): - """Collect operator compatibility information.""" - - def __init__(self, model: Path) -> None: - """Init the data collector.""" - self.model = model - - def collect_data(self) -> TOSACompatibilityInfo: - """Collect TOSA compatibility information.""" - tflite_model = get_tflite_model(self.model, self.context) - - with log_action("Checking operator compatibility ..."): - return get_tosa_compatibility_info(tflite_model.model_path) - - @classmethod - def name(cls) -> str: - """Return name of the collector.""" - return "tosa_operator_compatibility" diff --git a/src/mlia/devices/tosa/events.py b/src/mlia/devices/tosa/events.py deleted file mode 100644 index ceaba57..0000000 --- a/src/mlia/devices/tosa/events.py +++ /dev/null @@ -1,24 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA advisor events.""" -from dataclasses import dataclass -from pathlib import Path - -from mlia.core.events import Event -from mlia.core.events import EventDispatcher -from mlia.devices.tosa.config import TOSAConfiguration - - -@dataclass -class TOSAAdvisorStartedEvent(Event): - """Event with TOSA advisor parameters.""" - - model: Path - device: TOSAConfiguration - - -class TOSAAdvisorEventHandler(EventDispatcher): - """Event handler for the TOSA inference advisor.""" - - def on_tosa_advisor_started(self, event: TOSAAdvisorStartedEvent) -> None: - """Handle TOSAAdvisorStartedEvent event.""" diff --git a/src/mlia/devices/tosa/handlers.py b/src/mlia/devices/tosa/handlers.py deleted file mode 100644 index fc82657..0000000 --- a/src/mlia/devices/tosa/handlers.py +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""TOSA Advisor event handlers.""" -# pylint: disable=R0801 -from __future__ import annotations - -import logging - -from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo -from mlia.core.events import CollectedDataEvent -from mlia.core.handlers import WorkflowEventsHandler -from mlia.core.typing import PathOrFileLike -from mlia.devices.tosa.events import TOSAAdvisorEventHandler -from mlia.devices.tosa.events import TOSAAdvisorStartedEvent -from mlia.devices.tosa.reporters import tosa_formatters - -logger = logging.getLogger(__name__) - - -class TOSAEventHandler(WorkflowEventsHandler, TOSAAdvisorEventHandler): - """Event handler for TOSA advisor.""" - - def __init__(self, output: PathOrFileLike | None = None) -> None: - """Init event handler.""" - super().__init__(tosa_formatters, output) - - def on_tosa_advisor_started(self, event: TOSAAdvisorStartedEvent) -> None: - """Handle TOSAAdvisorStartedEvent event.""" - self.reporter.submit(event.device) - - def on_collected_data(self, event: CollectedDataEvent) -> None: - """Handle CollectedDataEvent event.""" - data_item = event.data_item - - if isinstance(data_item, TOSACompatibilityInfo): - self.reporter.submit(data_item.operators, delay_print=True) diff --git a/src/mlia/devices/tosa/operators.py b/src/mlia/devices/tosa/operators.py deleted file mode 100644 index b75ceb0..0000000 --- a/src/mlia/devices/tosa/operators.py +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Operators module.""" - - -def report() -> None: - """Generate supported operators report.""" - raise Exception( - "Generating a supported operators report is not " - "currently supported with TOSA target profile." - ) diff --git a/src/mlia/devices/tosa/reporters.py b/src/mlia/devices/tosa/reporters.py deleted file mode 100644 index e5559ee..0000000 --- a/src/mlia/devices/tosa/reporters.py +++ /dev/null @@ -1,83 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Reports module.""" -from __future__ import annotations - -from typing import Any -from typing import Callable - -from mlia.backend.tosa_checker.compat import Operator -from mlia.core.advice_generation import Advice -from mlia.core.reporters import report_advice -from mlia.core.reporting import Cell -from mlia.core.reporting import Column -from mlia.core.reporting import Format -from mlia.core.reporting import NestedReport -from mlia.core.reporting import Report -from mlia.core.reporting import ReportItem -from mlia.core.reporting import Table -from mlia.devices.tosa.config import TOSAConfiguration -from mlia.utils.console import style_improvement -from mlia.utils.types import is_list_of - - -def report_device(device: TOSAConfiguration) -> Report: - """Generate report for the device.""" - return NestedReport( - "Device information", - "device", - [ - ReportItem("Target", alias="target", value=device.target), - ], - ) - - -def report_tosa_operators(ops: list[Operator]) -> Report: - """Generate report for the operators.""" - return Table( - [ - Column("#", only_for=["plain_text"]), - Column( - "Operator location", - alias="operator_location", - fmt=Format(wrap_width=30), - ), - Column("Operator name", alias="operator_name", fmt=Format(wrap_width=20)), - Column( - "TOSA compatibility", - alias="is_tosa_compatible", - fmt=Format(wrap_width=25), - ), - ], - [ - ( - index + 1, - op.location, - op.name, - Cell( - op.is_tosa_compatible, - Format( - style=style_improvement(op.is_tosa_compatible), - str_fmt=lambda v: "Compatible" if v else "Not compatible", - ), - ), - ) - for index, op in enumerate(ops) - ], - name="Operators", - alias="operators", - ) - - -def tosa_formatters(data: Any) -> Callable[[Any], Report]: - """Find appropriate formatter for the provided data.""" - if is_list_of(data, Advice): - return report_advice - - if isinstance(data, TOSAConfiguration): - return report_device - - if is_list_of(data, Operator): - return report_tosa_operators - - raise Exception(f"Unable to find appropriate formatter for {data}") diff --git a/src/mlia/target/__init__.py b/src/mlia/target/__init__.py new file mode 100644 index 0000000..2370221 --- /dev/null +++ b/src/mlia/target/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Target module.""" diff --git a/src/mlia/target/config.py b/src/mlia/target/config.py new file mode 100644 index 0000000..7ab6b43 --- /dev/null +++ b/src/mlia/target/config.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""IP configuration module.""" + + +class IPConfiguration: # pylint: disable=too-few-public-methods + """Base class for IP configuration.""" + + def __init__(self, target: str) -> None: + """Init IP configuration instance.""" + self.target = target diff --git a/src/mlia/target/cortex_a/__init__.py b/src/mlia/target/cortex_a/__init__.py new file mode 100644 index 0000000..fe01835 --- /dev/null +++ b/src/mlia/target/cortex_a/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A target module.""" diff --git a/src/mlia/target/cortex_a/advice_generation.py b/src/mlia/target/cortex_a/advice_generation.py new file mode 100644 index 0000000..b68106e --- /dev/null +++ b/src/mlia/target/cortex_a/advice_generation.py @@ -0,0 +1,153 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A advice generation.""" +from functools import singledispatchmethod + +from mlia.core.advice_generation import advice_category +from mlia.core.advice_generation import FactBasedAdviceProducer +from mlia.core.common import AdviceCategory +from mlia.core.common import DataItem +from mlia.target.cortex_a.data_analysis import ModelHasCustomOperators +from mlia.target.cortex_a.data_analysis import ModelIsCortexACompatible +from mlia.target.cortex_a.data_analysis import ModelIsNotCortexACompatible +from mlia.target.cortex_a.data_analysis import ModelIsNotTFLiteCompatible +from mlia.target.cortex_a.data_analysis import TFLiteCompatibilityCheckFailed + + +class CortexAAdviceProducer(FactBasedAdviceProducer): + """Cortex-A advice producer.""" + + cortex_a_disclaimer = ( + "Note that the provided compatibility information is general. " + "At runtime individual operators in the given model might fall back to " + "the TensorFlow Lite reference or might produce errors based on the " + "specific parameters." + ) + + @singledispatchmethod + def produce_advice(self, _data_item: DataItem) -> None: # type: ignore + """Produce advice.""" + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_model_is_cortex_a_compatible( + self, data_item: ModelIsCortexACompatible + ) -> None: + """Advice for Cortex-A compatibility.""" + self.add_advice( + [ + f"Model is fully compatible with {data_item.backend_info} for " + "Cortex-A.", + self.cortex_a_disclaimer, + ] + ) + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_model_is_not_cortex_a_compatible( + self, data_item: ModelIsNotCortexACompatible + ) -> None: + """Advice for Cortex-A compatibility.""" + if data_item.unsupported_ops: + self.add_advice( + [ + "The following operators are not supported by " + f"{data_item.backend_info} and will fall back to the " + "TensorFlow Lite runtime:", + "\n".join(f" - {op}" for op in data_item.unsupported_ops), + ] + ) + + if data_item.activation_func_support: + self.add_advice( + [ + "The fused activation functions of the following operators " + f"are not supported by {data_item.backend_info}. Please " + "consider using one of the supported activation functions " + "instead:", + "\n".join( + f" - {op}\n" + f" - Used unsupported: {act.used_unsupported}\n" + f" - Supported: {act.supported}" + for op, act in data_item.activation_func_support.items() + ), + ] + ) + + self.add_advice( + [ + "Please, refer to the full table of operators above for more " + "information.", + self.cortex_a_disclaimer, + ] + ) + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_model_is_not_tflite_compatible( + self, data_item: ModelIsNotTFLiteCompatible + ) -> None: + """Advice for TensorFlow Lite compatibility.""" + if data_item.flex_ops: + self.add_advice( + [ + "The following operators are not natively " + "supported by TensorFlow Lite: " + f"{', '.join(data_item.flex_ops)}.", + "Using select TensorFlow operators in TensorFlow Lite model " + "requires special initialization of TFLiteConverter and " + "TensorFlow Lite run-time.", + "Please refer to the TensorFlow documentation for more " + "details: https://www.tensorflow.org/lite/guide/ops_select", + "Note, such models are not supported by the ML Inference Advisor.", + ] + ) + + if data_item.custom_ops: + self.add_advice( + [ + "The following operators appear to be custom and not natively " + "supported by TensorFlow Lite: " + f"{', '.join(data_item.custom_ops)}.", + "Using custom operators in TensorFlow Lite model " + "requires special initialization of TFLiteConverter and " + "TensorFlow Lite run-time.", + "Please refer to the TensorFlow documentation for more " + "details: https://www.tensorflow.org/lite/guide/ops_custom", + "Note, such models are not supported by the ML Inference Advisor.", + ] + ) + + if not data_item.flex_ops and not data_item.custom_ops: + self.add_advice( + [ + "Model could not be converted into TensorFlow Lite format.", + "Please refer to the table for more details.", + ] + ) + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_tflite_check_failed( + self, _data_item: TFLiteCompatibilityCheckFailed + ) -> None: + """Advice for the failed TensorFlow Lite compatibility checks.""" + self.add_advice( + [ + "Model could not be converted into TensorFlow Lite format.", + "Please refer to the table for more details.", + ] + ) + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_model_has_custom_operators( + self, _data_item: ModelHasCustomOperators + ) -> None: + """Advice for the models with custom operators.""" + self.add_advice( + [ + "Models with custom operators require special initialization " + "and currently are not supported by the ML Inference Advisor.", + ] + ) diff --git a/src/mlia/target/cortex_a/advisor.py b/src/mlia/target/cortex_a/advisor.py new file mode 100644 index 0000000..5912e38 --- /dev/null +++ b/src/mlia/target/cortex_a/advisor.py @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A MLIA module.""" +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from mlia.core.advice_generation import AdviceProducer +from mlia.core.advisor import DefaultInferenceAdvisor +from mlia.core.advisor import InferenceAdvisor +from mlia.core.common import AdviceCategory +from mlia.core.context import Context +from mlia.core.context import ExecutionContext +from mlia.core.data_analysis import DataAnalyzer +from mlia.core.data_collection import DataCollector +from mlia.core.events import Event +from mlia.core.typing import PathOrFileLike +from mlia.target.cortex_a.advice_generation import CortexAAdviceProducer +from mlia.target.cortex_a.config import CortexAConfiguration +from mlia.target.cortex_a.data_analysis import CortexADataAnalyzer +from mlia.target.cortex_a.data_collection import CortexAOperatorCompatibility +from mlia.target.cortex_a.events import CortexAAdvisorStartedEvent +from mlia.target.cortex_a.handlers import CortexAEventHandler + + +class CortexAInferenceAdvisor(DefaultInferenceAdvisor): + """Cortex-A Inference Advisor.""" + + @classmethod + def name(cls) -> str: + """Return name of the advisor.""" + return "cortex_a_inference_advisor" + + def get_collectors(self, context: Context) -> list[DataCollector]: + """Return list of the data collectors.""" + model = self.get_model(context) + + collectors: list[DataCollector] = [] + + if AdviceCategory.OPERATORS in context.advice_category: + collectors.append(CortexAOperatorCompatibility(model)) + + return collectors + + def get_analyzers(self, context: Context) -> list[DataAnalyzer]: + """Return list of the data analyzers.""" + return [ + CortexADataAnalyzer(), + ] + + def get_producers(self, context: Context) -> list[AdviceProducer]: + """Return list of the advice producers.""" + return [CortexAAdviceProducer()] + + def get_events(self, context: Context) -> list[Event]: + """Return list of the startup events.""" + model = self.get_model(context) + target_profile = self.get_target_profile(context) + + return [ + CortexAAdvisorStartedEvent(model, CortexAConfiguration(target_profile)), + ] + + +def configure_and_get_cortexa_advisor( + context: ExecutionContext, + target_profile: str, + model: str | Path, + output: PathOrFileLike | None = None, + **_extra_args: Any, +) -> InferenceAdvisor: + """Create and configure Cortex-A advisor.""" + if context.event_handlers is None: + context.event_handlers = [CortexAEventHandler(output)] + + if context.config_parameters is None: + context.config_parameters = _get_config_parameters(model, target_profile) + + return CortexAInferenceAdvisor() + + +def _get_config_parameters(model: str | Path, target_profile: str) -> dict[str, Any]: + """Get configuration parameters for the advisor.""" + advisor_parameters: dict[str, Any] = { + "cortex_a_inference_advisor": { + "model": str(model), + "target_profile": target_profile, + }, + } + + return advisor_parameters diff --git a/src/mlia/target/cortex_a/config.py b/src/mlia/target/cortex_a/config.py new file mode 100644 index 0000000..b2b51ea --- /dev/null +++ b/src/mlia/target/cortex_a/config.py @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A configuration.""" +from __future__ import annotations + +from mlia.target.config import IPConfiguration +from mlia.utils.filesystem import get_profile + + +class CortexAConfiguration(IPConfiguration): # pylint: disable=too-few-public-methods + """Cortex-A configuration.""" + + def __init__(self, target_profile: str) -> None: + """Init Cortex-A target configuration.""" + target_data = get_profile(target_profile) + + target = target_data["target"] + if target != "cortex-a": + raise Exception(f"Wrong target {target} for Cortex-A configuration") + super().__init__(target) diff --git a/src/mlia/target/cortex_a/data_analysis.py b/src/mlia/target/cortex_a/data_analysis.py new file mode 100644 index 0000000..4a3a068 --- /dev/null +++ b/src/mlia/target/cortex_a/data_analysis.py @@ -0,0 +1,128 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A data analysis module.""" +from __future__ import annotations + +from collections import defaultdict +from dataclasses import dataclass +from dataclasses import field +from functools import singledispatchmethod + +from mlia.core.common import DataItem +from mlia.core.data_analysis import Fact +from mlia.core.data_analysis import FactExtractor +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo +from mlia.target.cortex_a.operators import CortexACompatibilityInfo +from mlia.target.cortex_a.operators import Operator + + +class CortexADataAnalyzer(FactExtractor): + """Cortex-A data analyzer.""" + + @singledispatchmethod + def analyze_data(self, data_item: DataItem) -> None: # type: ignore + """Analyse the data.""" + + @analyze_data.register + def analyze_operator_compatibility( + self, data_item: CortexACompatibilityInfo + ) -> None: + """Analyse operator compatibility information.""" + if data_item.cortex_a_compatible: + self.add_fact(ModelIsCortexACompatible(data_item.backend_info)) + else: + unsupported_ops = set() + activation_func_support: defaultdict[ + str, ModelIsNotCortexACompatible.ActivationFunctionSupport + ] = defaultdict(ModelIsNotCortexACompatible.ActivationFunctionSupport) + for oper in data_item.operators: + if oper.support_type == Operator.SupportType.OP_NOT_SUPPORTED: + unsupported_ops.add(oper.full_name) + + if oper.support_type == Operator.SupportType.ACTIVATION_NOT_SUPPORTED: + # Add used but unsupported actication functions + activation_func_support[oper.full_name].used_unsupported.add( + oper.activation_func.name + ) + # Add supported activation functions + activation_func_support[oper.full_name].supported.update( + oper.supported_activation_functions + ) + + assert ( + unsupported_ops or activation_func_support or not data_item.operators + ), ( + "The model is marked as not compatible with Cortex-A but there " + "are no unsupported ops activation functions listed." + ) + + self.add_fact( + ModelIsNotCortexACompatible( + data_item.backend_info, unsupported_ops, activation_func_support + ) + ) + + @analyze_data.register + def analyze_tflite_compatibility(self, data_item: TFLiteCompatibilityInfo) -> None: + """Analyze TensorFlow Lite compatibility information.""" + if data_item.compatible: + return + + if data_item.conversion_failed_with_errors: + self.add_fact( + ModelIsNotTFLiteCompatible( + custom_ops=data_item.required_custom_ops, + flex_ops=data_item.required_flex_ops, + ) + ) + + if data_item.check_failed_with_unknown_error: + self.add_fact(TFLiteCompatibilityCheckFailed()) + + if data_item.conversion_failed_for_model_with_custom_ops: + self.add_fact(ModelHasCustomOperators()) + + +@dataclass +class CortexACompatibility(Fact): + """Base class for Cortex-A compatibility providing backend info.""" + + backend_info: str + + +@dataclass +class ModelIsCortexACompatible(CortexACompatibility): + """Model is completely compatible with Cortex-A.""" + + +@dataclass +class ModelIsNotCortexACompatible(CortexACompatibility): + """Model is not compatible with Cortex-A.""" + + @dataclass + class ActivationFunctionSupport: + """Activation function support per operator.""" + + used_unsupported: set[str] = field(default_factory=set) + supported: set[str] = field(default_factory=set) + + unsupported_ops: set[str] + activation_func_support: dict[str, ActivationFunctionSupport] + + +@dataclass +class ModelIsNotTFLiteCompatible(Fact): + """Model could not be converted into TensorFlow Lite format.""" + + custom_ops: list[str] | None = None + flex_ops: list[str] | None = None + + +@dataclass +class TFLiteCompatibilityCheckFailed(Fact): + """TensorFlow Lite compatibility check failed by unknown reason.""" + + +@dataclass +class ModelHasCustomOperators(Fact): + """Model could not be loaded because it contains custom ops.""" diff --git a/src/mlia/target/cortex_a/data_collection.py b/src/mlia/target/cortex_a/data_collection.py new file mode 100644 index 0000000..3ec63e2 --- /dev/null +++ b/src/mlia/target/cortex_a/data_collection.py @@ -0,0 +1,51 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Data collection module for Cortex-A.""" +from __future__ import annotations + +import logging +from pathlib import Path + +from mlia.core.data_collection import ContextAwareDataCollector +from mlia.nn.tensorflow.config import get_tflite_model +from mlia.nn.tensorflow.tflite_compat import TFLiteChecker +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo +from mlia.nn.tensorflow.utils import is_tflite_model +from mlia.target.cortex_a.operators import CortexACompatibilityInfo +from mlia.target.cortex_a.operators import get_cortex_a_compatibility_info +from mlia.utils.logging import log_action + + +logger = logging.getLogger(__name__) + + +class CortexAOperatorCompatibility(ContextAwareDataCollector): + """Collect operator compatibility information.""" + + def __init__(self, model: Path) -> None: + """Init operator compatibility data collector.""" + self.model = model + + def collect_data(self) -> TFLiteCompatibilityInfo | CortexACompatibilityInfo | None: + """Collect operator compatibility information.""" + if not is_tflite_model(self.model): + with log_action("Checking TensorFlow Lite compatibility ..."): + tflite_checker = TFLiteChecker() + tflite_compat = tflite_checker.check_compatibility(self.model) + + if not tflite_compat.compatible: + return tflite_compat + + tflite_model = get_tflite_model(self.model, self.context) + + with log_action("Checking operator compatibility ..."): + return ( + get_cortex_a_compatibility_info( # pylint: disable=assignment-from-none + Path(tflite_model.model_path) + ) + ) + + @classmethod + def name(cls) -> str: + """Return name of the collector.""" + return "cortex_a_operator_compatibility" diff --git a/src/mlia/target/cortex_a/events.py b/src/mlia/target/cortex_a/events.py new file mode 100644 index 0000000..a172d0d --- /dev/null +++ b/src/mlia/target/cortex_a/events.py @@ -0,0 +1,24 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A MLIA module events.""" +from dataclasses import dataclass +from pathlib import Path + +from mlia.core.events import Event +from mlia.core.events import EventDispatcher +from mlia.target.cortex_a.config import CortexAConfiguration + + +@dataclass +class CortexAAdvisorStartedEvent(Event): + """Event with Cortex-A advisor parameters.""" + + model: Path + device: CortexAConfiguration + + +class CortexAAdvisorEventHandler(EventDispatcher): + """Event handler for the Cortex-A inference advisor.""" + + def on_cortex_a_advisor_started(self, event: CortexAAdvisorStartedEvent) -> None: + """Handle CortexAAdvisorStarted event.""" diff --git a/src/mlia/target/cortex_a/handlers.py b/src/mlia/target/cortex_a/handlers.py new file mode 100644 index 0000000..b2d5faa --- /dev/null +++ b/src/mlia/target/cortex_a/handlers.py @@ -0,0 +1,39 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Event handler.""" +from __future__ import annotations + +import logging + +from mlia.core.events import CollectedDataEvent +from mlia.core.handlers import WorkflowEventsHandler +from mlia.core.typing import PathOrFileLike +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo +from mlia.target.cortex_a.events import CortexAAdvisorEventHandler +from mlia.target.cortex_a.events import CortexAAdvisorStartedEvent +from mlia.target.cortex_a.operators import CortexACompatibilityInfo +from mlia.target.cortex_a.reporters import cortex_a_formatters + +logger = logging.getLogger(__name__) + + +class CortexAEventHandler(WorkflowEventsHandler, CortexAAdvisorEventHandler): + """CLI event handler.""" + + def __init__(self, output: PathOrFileLike | None = None) -> None: + """Init event handler.""" + super().__init__(cortex_a_formatters, output) + + def on_collected_data(self, event: CollectedDataEvent) -> None: + """Handle CollectedDataEvent event.""" + data_item = event.data_item + + if isinstance(data_item, CortexACompatibilityInfo): + self.reporter.submit(data_item.operators, delay_print=True) + + if isinstance(data_item, TFLiteCompatibilityInfo) and not data_item.compatible: + self.reporter.submit(data_item, delay_print=True) + + def on_cortex_a_advisor_started(self, event: CortexAAdvisorStartedEvent) -> None: + """Handle CortexAAdvisorStarted event.""" + self.reporter.submit(event.device) diff --git a/src/mlia/target/cortex_a/operator_compatibility.py b/src/mlia/target/cortex_a/operator_compatibility.py new file mode 100644 index 0000000..c474e75 --- /dev/null +++ b/src/mlia/target/cortex_a/operator_compatibility.py @@ -0,0 +1,184 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Collection of Cortex-A operator compatibility information.""" +from __future__ import annotations + +from typing import Any + +ARMNN_TFLITE_DELEGATE: dict[str, dict[str, Any]] = { + "metadata": { + "backend": "Arm NN TensorFlow Lite delegate", + "version": "22.08", + }, + # BUILTIN OPERATORS + "builtin_ops": { + "ABS": {}, + "ADD": {}, + "ARG_MAX": {}, + "ARG_MIN": {}, + "AVERAGE_POOL_2D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "BATCH_TO_SPACE_ND": {}, + "CAST": {}, + "CONCATENATION": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "CONV_2D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "CONV_3D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "DEPTH_TO_SPACE": {}, + "DEPTHWISE_CONV_2D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "DEQUANTIZE": {}, + "DIV": {}, + "EQUAL": {}, + "ELU": {}, + "EXP": {}, + "EXPAND_DIMS": {}, + "FILL": {}, + "FLOOR": {}, + "FLOOR_DIV": {}, + "FULLY_CONNECTED": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "GATHER": {}, + "GATHER_ND": {}, + "GREATER": {}, + "GREATER_EQUAL": {}, + "HARD_SWISH": {}, + "L2_NORMALIZATION": {}, + "L2_POOL_2D": {}, + "LESS": {}, + "LESS_EQUAL": {}, + "LOCAL_RESPONSE_NORMALIZATION": {}, + "LOG": {}, + "LOGICAL_AND": {}, + "LOGICAL_NOT": {}, + "LOGICAL_OR": {}, + "LOGISTIC": {}, + "LOG_SOFTMAX": {}, + "LSTM": {}, + "MAXIMUM": {}, + "MAX_POOL_2D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + ] + }, + "MEAN": {}, + "MINIMUM": {}, + "MIRROR_PAD": {}, + "MUL": {}, + "NEG": {}, + "NOT_EQUAL": {}, + "PACK": {}, + "PAD": {}, + "PADV2": {}, + "PRELU": {}, + "QUANTIZE": {}, + "RANK": {}, + "REDUCE_MAX": {}, + "REDUCE_MIN": {}, + "REDUCE_PROD": {}, + "RELU": {}, + "RELU6": {}, + "RELU_N1_TO_1": {}, + "RESHAPE": {}, + "RESIZE_BILINEAR": {}, + "RESIZE_NEAREST_NEIGHBOR": {}, + "RSQRT": {}, + "SHAPE": {}, + "SIN": {}, + "SOFTMAX": {}, + "SPACE_TO_BATCH_ND": {}, + "SPACE_TO_DEPTH": {}, + "SPLIT": {}, + "SPLIT_V": {}, + "SQRT": {}, + "SQUEEZE": {}, + "STRIDED_SLICE": {}, + "SUB": {}, + "SUM": {}, + "TANH": {}, + "TRANSPOSE": {}, + "TRANSPOSE_CONV": {}, + "UNIDIRECTIONAL_SEQUENCE_LSTM": {}, + "UNPACK": {}, + }, + # CUSTOM OPERATORS + "custom_ops": { + "AveragePool3D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "SIGN_BIT", + "TANH", + "NONE", + ] + }, + "MaxPool3D": { + "supported_fused_activation": [ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "SIGN_BIT", + "TANH", + "NONE", + ] + }, + }, +} diff --git a/src/mlia/target/cortex_a/operators.py b/src/mlia/target/cortex_a/operators.py new file mode 100644 index 0000000..91f1886 --- /dev/null +++ b/src/mlia/target/cortex_a/operators.py @@ -0,0 +1,148 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Cortex-A tools module.""" +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Any +from typing import ClassVar + +from mlia.nn.tensorflow.tflite_graph import Op +from mlia.nn.tensorflow.tflite_graph import parse_subgraphs +from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION +from mlia.target.cortex_a.operator_compatibility import ( + ARMNN_TFLITE_DELEGATE as TFLITE_DELEGATE_COMPAT, +) + + +@dataclass +class Operator: + """Cortex-A compatibility information of the operator.""" + + BUILTIN_COMPATIBILITY = TFLITE_DELEGATE_COMPAT["builtin_ops"] + CUSTOM_COMPATIBILITY = TFLITE_DELEGATE_COMPAT["custom_ops"] + + class SupportType(Enum): + """Type of operator support.""" + + COMPATIBLE = "Compatible" + OP_NOT_SUPPORTED = "Operator not supported" + ACTIVATION_NOT_SUPPORTED = "Activation not supported" + + name: str + location: str + support_type: SupportType + activation_func: TFL_ACTIVATION_FUNCTION + custom_name: str | None = None + + @property + def is_cortex_a_compatible(self) -> bool: + """Check if this operator is compatible.""" + return self.support_type == Operator.SupportType.COMPATIBLE + + @property + def full_name(self) -> str: + """Returun the full name including the custom name if applicable.""" + return self.name + (f" - '{self.custom_name}'" if self.custom_name else "") + + @property + def is_custom(self) -> bool: + """Check if this is a custom operator.""" + return bool(self.custom_name) + + @property + def compatibility_data(self) -> dict[str, dict[str, Any]]: + """Get the compatibility data (builtin or custom ops).""" + return ( + Operator.CUSTOM_COMPATIBILITY + if self.is_custom + else Operator.BUILTIN_COMPATIBILITY + ) + + @property + def supported_activation_functions(self) -> list[str]: + """Return a list of fused activation functions supported by this op.""" + op_name = self.custom_name if self.custom_name else self.name + return self.compatibility_data[op_name].get("supported_fused_activation", []) + + @classmethod + def from_tflite_op(cls, tfl_op: Op, location: str) -> Operator: + """Create a new instance from TensorFlow Lite operator and location.""" + support_type = cls._get_support_type(tfl_op) + activation_func = ( + tfl_op.builtin_options["fused_activation_function"] + if ( + tfl_op.builtin_options + and "fused_activation_function" in tfl_op.builtin_options + ) + else TFL_ACTIVATION_FUNCTION.NONE + ) + return Operator( + tfl_op.type, + location, + support_type, + activation_func=activation_func, + custom_name=(tfl_op.custom_type if tfl_op.is_custom else None), + ) + + @staticmethod + def _get_support_type(tfl_op: Op) -> Operator.SupportType: + """Get the support type from the TensorFlow Lite operator.""" + compat_data = ( + Operator.CUSTOM_COMPATIBILITY + if tfl_op.is_custom + else Operator.BUILTIN_COMPATIBILITY + ) + op_type = tfl_op.custom_type if tfl_op.is_custom else tfl_op.type + + if op_type not in compat_data: + return Operator.SupportType.OP_NOT_SUPPORTED + + compat_op = compat_data[op_type] + if "supported_fused_activation" in compat_op: + assert tfl_op.builtin_options + assert "fused_activation_function" in tfl_op.builtin_options + if ( + tfl_op.builtin_options["fused_activation_function"] + not in compat_op["supported_fused_activation"] + ): + return Operator.SupportType.ACTIVATION_NOT_SUPPORTED + + return Operator.SupportType.COMPATIBLE + + +@dataclass +class CortexACompatibilityInfo: + """Model's operators.""" + + cortex_a_compatible: bool + operators: list[Operator] + backend_info: ClassVar[str] = ( + f"{TFLITE_DELEGATE_COMPAT['metadata']['backend']} " + f"{TFLITE_DELEGATE_COMPAT['metadata']['version']}" + ) + + +def get_cortex_a_compatibility_info(model_path: Path) -> CortexACompatibilityInfo: + """Return list of model's operators.""" + model = parse_subgraphs(model_path) + + op_list = [ + Operator.from_tflite_op(oper, f"subgraph:{g_idx},oper:{op_idx}") + for g_idx, g in enumerate(model) + for op_idx, oper in enumerate(g) + ] + all_compatible = all(oper.is_cortex_a_compatible for oper in op_list) + compat_info = CortexACompatibilityInfo(all_compatible, op_list) + + return compat_info + + +def report() -> None: + """Generate supported operators report.""" + raise Exception( + "Generating a supported operators report is not " + "currently supported with Cortex-A target profile." + ) diff --git a/src/mlia/target/cortex_a/reporters.py b/src/mlia/target/cortex_a/reporters.py new file mode 100644 index 0000000..d43d6c3 --- /dev/null +++ b/src/mlia/target/cortex_a/reporters.py @@ -0,0 +1,140 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Reports module.""" +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import cast + +from mlia.core.advice_generation import Advice +from mlia.core.reporters import report_advice +from mlia.core.reporting import Cell +from mlia.core.reporting import Column +from mlia.core.reporting import Format +from mlia.core.reporting import NestedReport +from mlia.core.reporting import Report +from mlia.core.reporting import ReportItem +from mlia.core.reporting import Table +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo +from mlia.target.cortex_a.config import CortexAConfiguration +from mlia.target.cortex_a.operators import Operator +from mlia.utils.console import style_improvement +from mlia.utils.types import is_list_of + + +def report_device(device: CortexAConfiguration) -> Report: + """Generate report for the device.""" + return NestedReport( + "Device information", + "device", + [ + ReportItem("Target", alias="target", value=device.target), + ], + ) + + +def report_tflite_compatiblity(compat_info: TFLiteCompatibilityInfo) -> Report: + """Generate report for the TensorFlow Lite compatibility information.""" + if compat_info.conversion_errors: + return Table( + [ + Column("#", only_for=["plain_text"]), + Column("Operator", alias="operator"), + Column( + "Operator location", + alias="operator_location", + fmt=Format(wrap_width=25), + ), + Column("Error code", alias="error_code"), + Column( + "Error message", alias="error_message", fmt=Format(wrap_width=25) + ), + ], + [ + ( + index + 1, + err.operator, + ", ".join(err.location), + err.code.name, + err.message, + ) + for index, err in enumerate(compat_info.conversion_errors) + ], + name="TensorFlow Lite conversion errors", + alias="tensorflow_lite_conversion_errors", + ) + + return Table( + columns=[ + Column("Reason", alias="reason"), + Column( + "Exception details", + alias="exception_details", + fmt=Format(wrap_width=40), + ), + ], + rows=[ + ( + "TensorFlow Lite compatibility check failed with exception", + str(compat_info.conversion_exception), + ), + ], + name="TensorFlow Lite compatibility errors", + alias="tflite_compatibility", + ) + + +def report_cortex_a_operators(ops: list[Operator]) -> Report: + """Generate report for the operators.""" + return Table( + [ + Column("#", only_for=["plain_text"]), + Column( + "Operator location", + alias="operator_location", + fmt=Format(wrap_width=30), + ), + Column("Operator name", alias="operator_name", fmt=Format(wrap_width=20)), + Column( + "Arm NN TFLite Delegate compatibility", + alias="cortex_a_compatible", + fmt=Format(wrap_width=40), + ), + ], + [ + ( + index + 1, + op.location, + op.full_name, + Cell( + op.support_type, + Format( + wrap_width=30, + style=style_improvement(op.is_cortex_a_compatible), + str_fmt=lambda v: cast(str, v.value), + ), + ), + ) + for index, op in enumerate(ops) + ], + name="Operators", + alias="operators", + ) + + +def cortex_a_formatters(data: Any) -> Callable[[Any], Report]: + """Find appropriate formatter for the provided data.""" + if is_list_of(data, Advice): + return report_advice + + if isinstance(data, CortexAConfiguration): + return report_device + + if isinstance(data, TFLiteCompatibilityInfo): + return report_tflite_compatiblity + + if is_list_of(data, Operator): + return report_cortex_a_operators + + raise Exception(f"Unable to find appropriate formatter for {data}") diff --git a/src/mlia/target/ethos_u/__init__.py b/src/mlia/target/ethos_u/__init__.py new file mode 100644 index 0000000..503919d --- /dev/null +++ b/src/mlia/target/ethos_u/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Ethos-U target module.""" diff --git a/src/mlia/target/ethos_u/advice_generation.py b/src/mlia/target/ethos_u/advice_generation.py new file mode 100644 index 0000000..edd78fd --- /dev/null +++ b/src/mlia/target/ethos_u/advice_generation.py @@ -0,0 +1,206 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Ethos-U advice generation.""" +from __future__ import annotations + +from functools import singledispatchmethod + +from mlia.core.advice_generation import Advice +from mlia.core.advice_generation import advice_category +from mlia.core.advice_generation import ContextAwareAdviceProducer +from mlia.core.advice_generation import FactBasedAdviceProducer +from mlia.core.common import AdviceCategory +from mlia.core.common import DataItem +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.target.ethos_u.data_analysis import AllOperatorsSupportedOnNPU +from mlia.target.ethos_u.data_analysis import HasCPUOnlyOperators +from mlia.target.ethos_u.data_analysis import HasUnsupportedOnNPUOperators +from mlia.target.ethos_u.data_analysis import OptimizationResults + + +class EthosUAdviceProducer(FactBasedAdviceProducer): + """Ethos-U advice producer.""" + + @singledispatchmethod + def produce_advice(self, data_item: DataItem) -> None: # type: ignore + """Produce advice.""" + + @produce_advice.register + @advice_category(AdviceCategory.OPERATORS, AdviceCategory.ALL) + def handle_cpu_only_ops(self, data_item: HasCPUOnlyOperators) -> None: + """Advice for CPU only operators.""" + cpu_only_ops = ",".join(sorted(set(data_item.cpu_only_ops))) + cpu_only_ops_num = len(data_item.cpu_only_ops) + + self.add_advice( + [ + f"You have at least {cpu_only_ops_num} " + f"operator{'s' if cpu_only_ops_num > 1 else ''} that is CPU " + f"only: {cpu_only_ops}.", + "Using operators that are supported by the NPU will " + "improve performance.", + ] + + self.context.action_resolver.supported_operators_info() + ) + + @produce_advice.register + @advice_category(AdviceCategory.OPERATORS, AdviceCategory.ALL) + def handle_unsupported_operators( + self, data_item: HasUnsupportedOnNPUOperators + ) -> None: + """Advice for the unsupported operators.""" + self.add_advice( + [ + f"You have {data_item.npu_unsupported_ratio*100:.0f}% of operators " + "that cannot be placed on the NPU.", + "For better performance, please review the reasons reported " + "in the table, and adjust the model accordingly " + "where possible.", + ] + ) + + @produce_advice.register + @advice_category(AdviceCategory.OPERATORS, AdviceCategory.ALL) + def handle_all_operators_supported( + self, _data_item: AllOperatorsSupportedOnNPU + ) -> None: + """Advice if all operators supported.""" + self.add_advice( + [ + "You don't have any unsupported operators, your model will " + "run completely on NPU." + ] + + self.context.action_resolver.check_performance() + ) + + @produce_advice.register + @advice_category(AdviceCategory.OPTIMIZATION, AdviceCategory.ALL) + def handle_optimization_results(self, data_item: OptimizationResults) -> None: + """Advice based on optimization results.""" + if not data_item.diffs or len(data_item.diffs) != 1: + return + + optim_details = data_item.diffs[0] + metrics = [ + (metric_name, optim_details.opt_diffs[metric_key]) + for (metric_name, metric_key) in ( + ("DRAM used (KB)", "dram"), + ("SRAM used (KB)", "sram"), + ("On chip flash used (KB)", "on_chip_flash"), + ("Off chip flash used (KB)", "off_chip_flash"), + ("NPU total cycles", "npu_total_cycles"), + ) + if metric_key in optim_details.opt_diffs + and not optim_details.opt_diffs[metric_key].same + ] + + improved = [ + f"- You have achieved {abs(metric_value.diff):.2f}% performance " + f"improvement in {metric_name}" + for metric_name, metric_value in metrics + if metric_value.improved + ] + + degraded = [ + f"- {metric_name} have degraded by {abs(metric_value.diff):.2f}%" + for metric_name, metric_value in metrics + if metric_value.degraded + ] + + opts = ", ".join(str(s) for s in optim_details.opt_type) + messages = [f"With the selected optimization ({opts})", *improved, *degraded] + + if improved: + if next_optimization_target := self.get_next_optimization_targets( + optim_details.opt_type + ): + next_optimization_target_as_str = " and/or ".join( + str(item) for item in next_optimization_target + ) + + messages.append( + "You can try to push the optimization target higher " + f"(e.g. {next_optimization_target_as_str}) " + "to check if those results can be further improved." + ) + messages += self.context.action_resolver.apply_optimizations( + opt_settings=next_optimization_target + ) + + elif degraded: + messages.append( + "The performance seems to have degraded after " + "applying the selected optimizations, " + "try exploring different optimization types/targets." + ) + + self.add_advice(messages) + + self.add_advice( + [ + "The applied tooling techniques have an impact " + "on accuracy. Additional hyperparameter tuning may be required " + "after any optimization." + ] + ) + + @staticmethod + def get_next_optimization_targets( + opt_type: list[OptimizationSettings], + ) -> list[OptimizationSettings]: + """Get next optimization targets.""" + next_targets = (item.next_target() for item in opt_type) + + # filter out targets that have not been changed + valid_targets = [ + next_ + for next_, old in zip(next_targets, opt_type) + if ( + old.optimization_type == "pruning" + and old.optimization_target < next_.optimization_target + ) + or ( + old.optimization_type == "clustering" + and old.optimization_target > next_.optimization_target + ) + ] + return valid_targets + + +class EthosUStaticAdviceProducer(ContextAwareAdviceProducer): + """Advice producer that not depends on input data.""" + + def produce_advice(self, data_item: DataItem) -> None: + """Do not process passed data items.""" + + def get_advice(self) -> Advice | list[Advice]: + """Return predefined advice based on category.""" + advice_per_category = { + AdviceCategory.PERFORMANCE: [ + Advice( + [ + "You can improve the inference time by using only operators " + "that are supported by the NPU.", + ] + + self.context.action_resolver.check_operator_compatibility() + ), + Advice( + [ + "Check if you can improve the performance by applying " + "tooling techniques to your model." + ] + + self.context.action_resolver.apply_optimizations() + ), + ], + AdviceCategory.OPTIMIZATION: [ + Advice( + [ + "For better performance, make sure that all the operators " + "of your final TensorFlow Lite model are supported by the NPU.", + ] + + self.context.action_resolver.operator_compatibility_details() + ) + ], + } + + return advice_per_category.get(self.context.advice_category, []) diff --git a/src/mlia/target/ethos_u/advisor.py b/src/mlia/target/ethos_u/advisor.py new file mode 100644 index 0000000..b9d64ff --- /dev/null +++ b/src/mlia/target/ethos_u/advisor.py @@ -0,0 +1,194 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Ethos-U MLIA module.""" +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from mlia.core.advice_generation import AdviceProducer +from mlia.core.advisor import DefaultInferenceAdvisor +from mlia.core.advisor import InferenceAdvisor +from mlia.core.common import AdviceCategory +from mlia.core.context import Context +from mlia.core.context import ExecutionContext +from mlia.core.data_analysis import DataAnalyzer +from mlia.core.data_collection import DataCollector +from mlia.core.events import Event +from mlia.core.typing import PathOrFileLike +from mlia.nn.tensorflow.utils import is_tflite_model +from mlia.target.ethos_u.advice_generation import EthosUAdviceProducer +from mlia.target.ethos_u.advice_generation import EthosUStaticAdviceProducer +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.config import get_target +from mlia.target.ethos_u.data_analysis import EthosUDataAnalyzer +from mlia.target.ethos_u.data_collection import EthosUOperatorCompatibility +from mlia.target.ethos_u.data_collection import EthosUOptimizationPerformance +from mlia.target.ethos_u.data_collection import EthosUPerformance +from mlia.target.ethos_u.events import EthosUAdvisorStartedEvent +from mlia.target.ethos_u.handlers import EthosUEventHandler +from mlia.utils.types import is_list_of + + +class EthosUInferenceAdvisor(DefaultInferenceAdvisor): + """Ethos-U Inference Advisor.""" + + @classmethod + def name(cls) -> str: + """Return name of the advisor.""" + return "ethos_u_inference_advisor" + + def get_collectors(self, context: Context) -> list[DataCollector]: + """Return list of the data collectors.""" + model = self.get_model(context) + device = self._get_device(context) + backends = self._get_backends(context) + + collectors: list[DataCollector] = [] + + if AdviceCategory.OPERATORS in context.advice_category: + collectors.append(EthosUOperatorCompatibility(model, device)) + + # Performance and optimization are mutually exclusive. + # Decide which one to use (taking into account the model format). + if is_tflite_model(model): + # TensorFlow Lite models do not support optimization (only performance)! + if context.advice_category == AdviceCategory.OPTIMIZATION: + raise Exception( + "Command 'optimization' is not supported for TensorFlow Lite files." + ) + if AdviceCategory.PERFORMANCE in context.advice_category: + collectors.append(EthosUPerformance(model, device, backends)) + else: + # Keras/SavedModel: Prefer optimization + if AdviceCategory.OPTIMIZATION in context.advice_category: + optimization_settings = self._get_optimization_settings(context) + collectors.append( + EthosUOptimizationPerformance( + model, device, optimization_settings, backends + ) + ) + elif AdviceCategory.PERFORMANCE in context.advice_category: + collectors.append(EthosUPerformance(model, device, backends)) + + return collectors + + def get_analyzers(self, context: Context) -> list[DataAnalyzer]: + """Return list of the data analyzers.""" + return [ + EthosUDataAnalyzer(), + ] + + def get_producers(self, context: Context) -> list[AdviceProducer]: + """Return list of the advice producers.""" + return [ + EthosUAdviceProducer(), + EthosUStaticAdviceProducer(), + ] + + def get_events(self, context: Context) -> list[Event]: + """Return list of the startup events.""" + model = self.get_model(context) + device = self._get_device(context) + + return [ + EthosUAdvisorStartedEvent(device=device, model=model), + ] + + def _get_device(self, context: Context) -> EthosUConfiguration: + """Get device.""" + target_profile = self.get_target_profile(context) + + return get_target(target_profile) + + def _get_optimization_settings(self, context: Context) -> list[list[dict]]: + """Get optimization settings.""" + return self.get_parameter( # type: ignore + EthosUOptimizationPerformance.name(), + "optimizations", + expected_type=list, + expected=False, + context=context, + ) + + def _get_backends(self, context: Context) -> list[str] | None: + """Get list of backends.""" + return self.get_parameter( # type: ignore + self.name(), + "backends", + expected_type=list, + expected=False, + context=context, + ) + + +def configure_and_get_ethosu_advisor( + context: ExecutionContext, + target_profile: str, + model: str | Path, + output: PathOrFileLike | None = None, + **extra_args: Any, +) -> InferenceAdvisor: + """Create and configure Ethos-U advisor.""" + if context.event_handlers is None: + context.event_handlers = [EthosUEventHandler(output)] + + if context.config_parameters is None: + context.config_parameters = _get_config_parameters( + model, target_profile, **extra_args + ) + + return EthosUInferenceAdvisor() + + +_DEFAULT_OPTIMIZATION_TARGETS = [ + { + "optimization_type": "pruning", + "optimization_target": 0.5, + "layers_to_optimize": None, + }, + { + "optimization_type": "clustering", + "optimization_target": 32, + "layers_to_optimize": None, + }, +] + + +def _get_config_parameters( + model: str | Path, + target_profile: str, + **extra_args: Any, +) -> dict[str, Any]: + """Get configuration parameters for the advisor.""" + advisor_parameters: dict[str, Any] = { + "ethos_u_inference_advisor": { + "model": model, + "target_profile": target_profile, + }, + } + + # Specifying backends is optional (default is used) + backends = extra_args.get("backends") + if backends is not None: + if not is_list_of(backends, str): + raise Exception("Backends value has wrong format") + + advisor_parameters["ethos_u_inference_advisor"]["backends"] = backends + + optimization_targets = extra_args.get("optimization_targets") + if not optimization_targets: + optimization_targets = _DEFAULT_OPTIMIZATION_TARGETS + + if not is_list_of(optimization_targets, dict): + raise Exception("Optimization targets value has wrong format") + + advisor_parameters.update( + { + "ethos_u_model_optimizations": { + "optimizations": [optimization_targets], + }, + } + ) + + return advisor_parameters diff --git a/src/mlia/target/ethos_u/config.py b/src/mlia/target/ethos_u/config.py new file mode 100644 index 0000000..8d8f481 --- /dev/null +++ b/src/mlia/target/ethos_u/config.py @@ -0,0 +1,90 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Ethos-U configuration.""" +from __future__ import annotations + +import logging +from typing import Any + +from mlia.backend.vela.compiler import resolve_compiler_config +from mlia.backend.vela.compiler import VelaCompilerOptions +from mlia.target.config import IPConfiguration +from mlia.utils.filesystem import get_profile +from mlia.utils.filesystem import get_vela_config + + +logger = logging.getLogger(__name__) + + +class EthosUConfiguration(IPConfiguration): + """Ethos-U configuration.""" + + def __init__(self, target_profile: str) -> None: + """Init Ethos-U target configuration.""" + target_data = get_profile(target_profile) + _check_target_data_complete(target_data) + + target = target_data["target"] + super().__init__(target) + + mac = target_data["mac"] + _check_device_options_valid(target, mac) + + self.mac = mac + self.compiler_options = VelaCompilerOptions( + system_config=target_data["system_config"], + memory_mode=target_data["memory_mode"], + config_files=str(get_vela_config()), + accelerator_config=f"{self.target}-{mac}", # type: ignore + ) + + @property + def resolved_compiler_config(self) -> dict[str, Any]: + """Resolve compiler configuration.""" + return resolve_compiler_config(self.compiler_options) + + def __str__(self) -> str: + """Return string representation.""" + return ( + f"Ethos-U target={self.target} " + f"mac={self.mac} " + f"compiler_options={self.compiler_options}" + ) + + def __repr__(self) -> str: + """Return string representation.""" + return f"" + + +def get_target(target_profile: str) -> EthosUConfiguration: + """Get target instance based on provided params.""" + if not target_profile: + raise Exception("No target profile given") + + return EthosUConfiguration(target_profile) + + +def _check_target_data_complete(target_data: dict[str, Any]) -> None: + """Check if profile contains all needed data.""" + mandatory_keys = {"target", "mac", "system_config", "memory_mode"} + missing_keys = sorted(mandatory_keys - target_data.keys()) + + if missing_keys: + raise Exception(f"Mandatory fields missing from target profile: {missing_keys}") + + +def _check_device_options_valid(target: str, mac: int) -> None: + """Check if mac is valid for selected device.""" + target_mac_ranges = { + "ethos-u55": [32, 64, 128, 256], + "ethos-u65": [256, 512], + } + + if target not in target_mac_ranges: + raise Exception(f"Unsupported target: {target}") + + target_mac_range = target_mac_ranges[target] + if mac not in target_mac_range: + raise Exception( + f"Mac value for selected device should be in {target_mac_range}" + ) diff --git a/src/mlia/target/ethos_u/data_analysis.py b/src/mlia/target/ethos_u/data_analysis.py new file mode 100644 index 0000000..6b66734 --- /dev/null +++ b/src/mlia/target/ethos_u/data_analysis.py @@ -0,0 +1,153 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Ethos-U data analysis module.""" +from __future__ import annotations + +from dataclasses import dataclass +from functools import singledispatchmethod + +from mlia.backend.vela.compat import Operators +from mlia.core.common import DataItem +from mlia.core.data_analysis import Fact +from mlia.core.data_analysis import FactExtractor +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.target.ethos_u.performance import OptimizationPerformanceMetrics + + +@dataclass +class HasCPUOnlyOperators(Fact): + """Model has CPU only operators.""" + + cpu_only_ops: list[str] + + +@dataclass +class HasUnsupportedOnNPUOperators(Fact): + """Model has unsupported on NPU operators.""" + + npu_unsupported_ratio: float + + +@dataclass +class AllOperatorsSupportedOnNPU(Fact): + """All model's operators supported on NPU.""" + + +@dataclass +class PerfMetricDiff: + """Performance metric difference.""" + + original_value: int | float + optimized_value: int | float + + @property + def diff(self) -> float: + """Difference between metrics.""" + if self.original_value == 0: + return 0 + + return 100 - ((self.optimized_value / self.original_value) * 100) + + @property + def improved(self) -> bool: + """Return true if metric improved.""" + return self.diff > 0 + + @property + def degraded(self) -> bool: + """Return true if metric degraded.""" + return self.diff < 0 + + @property + def same(self) -> bool: + """Return true if metric stays the same.""" + return self.diff == 0 + + +@dataclass +class OptimizationDiff: + """Optimization performance impact.""" + + opt_type: list[OptimizationSettings] + opt_diffs: dict[str, PerfMetricDiff] + + +@dataclass +class OptimizationResults(Fact): + """Optimization results.""" + + diffs: list[OptimizationDiff] + + +class EthosUDataAnalyzer(FactExtractor): + """Ethos-U data analyzer.""" + + @singledispatchmethod + def analyze_data(self, data_item: DataItem) -> None: # type: ignore + """Analyse the data.""" + + @analyze_data.register + def analyze_operator_compatibility(self, operators: Operators) -> None: + """Analyse operator compatibility information.""" + cpu_only = [op.op_type for op in operators.ops if op.cpu_only] + if cpu_only: + self.add_fact(HasCPUOnlyOperators(cpu_only)) + + if operators.npu_unsupported_ratio != 0: + self.add_fact(HasUnsupportedOnNPUOperators(operators.npu_unsupported_ratio)) + + if operators.npu_unsupported_ratio == 0: + self.add_fact(AllOperatorsSupportedOnNPU()) + + @analyze_data.register + def analyze_optimization_results( + self, optimization_results: OptimizationPerformanceMetrics + ) -> None: + """Analyse optimization performance metrics.""" + optimizations = optimization_results.optimizations_perf_metrics + if not optimizations: + return + + orig = optimization_results.original_perf_metrics.in_kilobytes() + orig_memory = orig.memory_usage + orig_cycles = orig.npu_cycles + + diffs: list[OptimizationDiff] = [] + for opt_type, opt_perf_metrics in optimizations: + opt = opt_perf_metrics.in_kilobytes() + opt_memory = opt.memory_usage + opt_cycles = opt.npu_cycles + + opt_diffs: dict[str, PerfMetricDiff] = {} + + if orig_memory and opt_memory: + opt_diffs.update( + { + "sram": PerfMetricDiff( + orig_memory.sram_memory_area_size, + opt_memory.sram_memory_area_size, + ), + "dram": PerfMetricDiff( + orig_memory.dram_memory_area_size, + opt_memory.dram_memory_area_size, + ), + "on_chip_flash": PerfMetricDiff( + orig_memory.on_chip_flash_memory_area_size, + opt_memory.on_chip_flash_memory_area_size, + ), + "off_chip_flash": PerfMetricDiff( + orig_memory.off_chip_flash_memory_area_size, + opt_memory.off_chip_flash_memory_area_size, + ), + } + ) + if orig_cycles and opt_cycles: + opt_diffs["npu_total_cycles"] = PerfMetricDiff( + orig_cycles.npu_total_cycles, + opt_cycles.npu_total_cycles, + ) + + diff = OptimizationDiff(opt_type=opt_type, opt_diffs=opt_diffs) + diffs.append(diff) + + self.add_fact(OptimizationResults(diffs)) diff --git a/src/mlia/target/ethos_u/data_collection.py b/src/mlia/target/ethos_u/data_collection.py new file mode 100644 index 0000000..258876d --- /dev/null +++ b/src/mlia/target/ethos_u/data_collection.py @@ -0,0 +1,187 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Data collection module for Ethos-U.""" +from __future__ import annotations + +import logging +from pathlib import Path + +from mlia.backend.vela.compat import Operators +from mlia.backend.vela.compat import supported_operators +from mlia.core.context import Context +from mlia.core.data_collection import ContextAwareDataCollector +from mlia.core.errors import FunctionalityNotSupportedError +from mlia.core.performance import estimate_performance +from mlia.nn.tensorflow.config import get_keras_model +from mlia.nn.tensorflow.config import get_tflite_model +from mlia.nn.tensorflow.config import KerasModel +from mlia.nn.tensorflow.optimizations.select import get_optimizer +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.nn.tensorflow.utils import save_keras_model +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.performance import EthosUPerformanceEstimator +from mlia.target.ethos_u.performance import OptimizationPerformanceMetrics +from mlia.target.ethos_u.performance import PerformanceMetrics +from mlia.utils.logging import log_action +from mlia.utils.types import is_list_of + +logger = logging.getLogger(__name__) + + +class EthosUOperatorCompatibility(ContextAwareDataCollector): + """Collect operator compatibility information.""" + + def __init__(self, model: Path, device: EthosUConfiguration) -> None: + """Init operator compatibility data collector.""" + self.model = model + self.device = device + + def collect_data(self) -> Operators: + """Collect operator compatibility information.""" + tflite_model = get_tflite_model(self.model, self.context) + + with log_action("Checking operator compatibility ..."): + return supported_operators( + Path(tflite_model.model_path), self.device.compiler_options + ) + + @classmethod + def name(cls) -> str: + """Return name of the collector.""" + return "ethos_u_operator_compatibility" + + +class EthosUPerformance(ContextAwareDataCollector): + """Collect performance metrics.""" + + def __init__( + self, + model: Path, + device: EthosUConfiguration, + backends: list[str] | None = None, + ) -> None: + """Init performance data collector.""" + self.model = model + self.device = device + self.backends = backends + + def collect_data(self) -> PerformanceMetrics: + """Collect model performance metrics.""" + tflite_model = get_tflite_model(self.model, self.context) + estimator = EthosUPerformanceEstimator( + self.context, + self.device, + self.backends, + ) + + return estimator.estimate(tflite_model) + + @classmethod + def name(cls) -> str: + """Return name of the collector.""" + return "ethos_u_performance" + + +class OptimizeModel: + """Helper class for model optimization.""" + + def __init__( + self, context: Context, opt_settings: list[OptimizationSettings] + ) -> None: + """Init helper.""" + self.context = context + self.opt_settings = opt_settings + + def __call__(self, keras_model: KerasModel) -> KerasModel: + """Run optimization.""" + optimizer = get_optimizer(keras_model, self.opt_settings) + + opts_as_str = ", ".join(str(opt) for opt in self.opt_settings) + logger.info("Applying model optimizations - [%s]", opts_as_str) + optimizer.apply_optimization() + + model = optimizer.get_model() + model_path = self.context.get_model_path("optimized_model.h5") + save_keras_model(model, model_path) + + return KerasModel(model_path) + + +class EthosUOptimizationPerformance(ContextAwareDataCollector): + """Collect performance metrics for the optimizations.""" + + def __init__( + self, + model: Path, + device: EthosUConfiguration, + optimizations: list[list[dict]], + backends: list[str] | None = None, + ) -> None: + """Init performance optimizations data collector.""" + self.model = model + self.device = device + self.optimizations = optimizations + self.backends = backends + + def collect_data(self) -> OptimizationPerformanceMetrics | None: + """Collect performance metrics for the optimizations.""" + logger.info("Estimate performance ...") + + if not self.optimizations: + raise FunctionalityNotSupportedError( + reason="Unable to estimate model optimizations impact", + description="No optimization targets provided", + ) + + opt_settings = self._parse_optimization_params(self.optimizations) + + try: + keras_model = get_keras_model(self.model, self.context) + except NotImplementedError as err: + raise FunctionalityNotSupportedError( + reason="Unable to run model optimizations", + description=f"{self.model} is not a Keras model and " + "could not be converted to a Keras model", + ) from err + + optimizers = [OptimizeModel(self.context, opts) for opts in opt_settings] + + estimator = EthosUPerformanceEstimator( + self.context, + self.device, + self.backends, + ) + original_metrics, *optimized_metrics = estimate_performance( + keras_model, estimator, optimizers # type: ignore + ) + + result = OptimizationPerformanceMetrics( + original_perf_metrics=original_metrics, + optimizations_perf_metrics=list(zip(opt_settings, optimized_metrics)), + ) + return result + + @staticmethod + def _parse_optimization_params( + optimizations: list[list[dict]], + ) -> list[list[OptimizationSettings]]: + """Parse optimization parameters.""" + if not is_list_of(optimizations, list): + raise Exception("Optimization parameters expected to be a list") + + return [ + [ + OptimizationSettings( + item.get("optimization_type"), # type: ignore + item.get("optimization_target"), # type: ignore + item.get("layers_to_optimized"), + ) + for item in opt_configuration + ] + for opt_configuration in optimizations + ] + + @classmethod + def name(cls) -> str: + """Return name of the collector.""" + return "ethos_u_model_optimizations" diff --git a/src/mlia/target/ethos_u/events.py b/src/mlia/target/ethos_u/events.py new file mode 100644 index 0000000..37cc1a9 --- /dev/null +++ b/src/mlia/target/ethos_u/events.py @@ -0,0 +1,24 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Ethos-U MLIA module events.""" +from dataclasses import dataclass +from pathlib import Path + +from mlia.core.events import Event +from mlia.core.events import EventDispatcher +from mlia.target.ethos_u.config import EthosUConfiguration + + +@dataclass +class EthosUAdvisorStartedEvent(Event): + """Event with Ethos-U advisor parameters.""" + + model: Path + device: EthosUConfiguration + + +class EthosUAdvisorEventHandler(EventDispatcher): + """Event handler for the Ethos-U inference advisor.""" + + def on_ethos_u_advisor_started(self, event: EthosUAdvisorStartedEvent) -> None: + """Handle EthosUAdvisorStarted event.""" diff --git a/src/mlia/target/ethos_u/handlers.py b/src/mlia/target/ethos_u/handlers.py new file mode 100644 index 0000000..84a9554 --- /dev/null +++ b/src/mlia/target/ethos_u/handlers.py @@ -0,0 +1,55 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Event handler.""" +from __future__ import annotations + +import logging + +from mlia.backend.vela.compat import Operators +from mlia.core.events import CollectedDataEvent +from mlia.core.handlers import WorkflowEventsHandler +from mlia.core.typing import PathOrFileLike +from mlia.target.ethos_u.events import EthosUAdvisorEventHandler +from mlia.target.ethos_u.events import EthosUAdvisorStartedEvent +from mlia.target.ethos_u.performance import OptimizationPerformanceMetrics +from mlia.target.ethos_u.performance import PerformanceMetrics +from mlia.target.ethos_u.reporters import ethos_u_formatters + +logger = logging.getLogger(__name__) + + +class EthosUEventHandler(WorkflowEventsHandler, EthosUAdvisorEventHandler): + """CLI event handler.""" + + def __init__(self, output: PathOrFileLike | None = None) -> None: + """Init event handler.""" + super().__init__(ethos_u_formatters, output) + + def on_collected_data(self, event: CollectedDataEvent) -> None: + """Handle CollectedDataEvent event.""" + data_item = event.data_item + + if isinstance(data_item, Operators): + self.reporter.submit([data_item.ops, data_item], delay_print=True) + + if isinstance(data_item, PerformanceMetrics): + self.reporter.submit(data_item, delay_print=True, space=True) + + if isinstance(data_item, OptimizationPerformanceMetrics): + original_metrics = data_item.original_perf_metrics + if not data_item.optimizations_perf_metrics: + return + + _opt_settings, optimized_metrics = data_item.optimizations_perf_metrics[0] + + self.reporter.submit( + [original_metrics, optimized_metrics], + delay_print=True, + columns_name="Metrics", + title="Performance metrics", + space=True, + ) + + def on_ethos_u_advisor_started(self, event: EthosUAdvisorStartedEvent) -> None: + """Handle EthosUAdvisorStarted event.""" + self.reporter.submit(event.device) diff --git a/src/mlia/target/ethos_u/operators.py b/src/mlia/target/ethos_u/operators.py new file mode 100644 index 0000000..97c2b17 --- /dev/null +++ b/src/mlia/target/ethos_u/operators.py @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Operators module.""" +import logging + +from mlia.backend.vela.compat import generate_supported_operators_report + + +logger = logging.getLogger(__name__) + + +def report() -> None: + """Generate supported operators report.""" + generate_supported_operators_report() diff --git a/src/mlia/target/ethos_u/performance.py b/src/mlia/target/ethos_u/performance.py new file mode 100644 index 0000000..e39f4d9 --- /dev/null +++ b/src/mlia/target/ethos_u/performance.py @@ -0,0 +1,261 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Performance estimation.""" +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Union + +import mlia.backend.vela.compiler as vela_comp +import mlia.backend.vela.performance as vela_perf +from mlia.backend.corstone.performance import DeviceInfo +from mlia.backend.corstone.performance import estimate_performance +from mlia.backend.corstone.performance import ModelInfo +from mlia.backend.install import is_supported +from mlia.backend.install import supported_backends +from mlia.core.context import Context +from mlia.core.performance import PerformanceEstimator +from mlia.nn.tensorflow.config import get_tflite_model +from mlia.nn.tensorflow.config import ModelConfiguration +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.utils.logging import log_action + + +logger = logging.getLogger(__name__) + + +@dataclass +class NPUCycles: + """NPU cycles metrics.""" + + npu_active_cycles: int + npu_idle_cycles: int + npu_total_cycles: int + npu_axi0_rd_data_beat_received: int + npu_axi0_wr_data_beat_written: int + npu_axi1_rd_data_beat_received: int + + +BYTES_PER_KILOBYTE = 1024 + + +class MemorySizeType(Enum): + """Memory size type enumeration.""" + + BYTES = 0 + KILOBYTES = 1 + + +@dataclass +class MemoryUsage: + """Memory usage metrics.""" + + sram_memory_area_size: int | float + dram_memory_area_size: int | float + unknown_memory_area_size: int | float + on_chip_flash_memory_area_size: int | float + off_chip_flash_memory_area_size: int | float + memory_size_type: MemorySizeType = MemorySizeType.BYTES + + _default_columns = [ + "SRAM used", + "DRAM used", + "Unknown memory used", + "On chip flash used", + "Off chip flash used", + ] + + def in_kilobytes(self) -> MemoryUsage: + """Return memory usage with values in kilobytes.""" + if self.memory_size_type == MemorySizeType.KILOBYTES: + return self + + kilobytes = [ + value / BYTES_PER_KILOBYTE + for value in [ + self.sram_memory_area_size, + self.dram_memory_area_size, + self.unknown_memory_area_size, + self.on_chip_flash_memory_area_size, + self.off_chip_flash_memory_area_size, + ] + ] + + return MemoryUsage( + *kilobytes, # type: ignore + memory_size_type=MemorySizeType.KILOBYTES, + ) + + +@dataclass +class PerformanceMetrics: + """Performance metrics.""" + + device: EthosUConfiguration + npu_cycles: NPUCycles | None + memory_usage: MemoryUsage | None + + def in_kilobytes(self) -> PerformanceMetrics: + """Return metrics with memory usage in KiB.""" + if self.memory_usage is None: + return PerformanceMetrics(self.device, self.npu_cycles, self.memory_usage) + + return PerformanceMetrics( + self.device, self.npu_cycles, self.memory_usage.in_kilobytes() + ) + + +@dataclass +class OptimizationPerformanceMetrics: + """Optimization performance metrics.""" + + original_perf_metrics: PerformanceMetrics + optimizations_perf_metrics: list[ + tuple[list[OptimizationSettings], PerformanceMetrics] + ] + + +class VelaPerformanceEstimator( + PerformanceEstimator[Union[Path, ModelConfiguration], MemoryUsage] +): + """Vela based performance estimator.""" + + def __init__(self, context: Context, device: EthosUConfiguration) -> None: + """Init Vela based performance estimator.""" + self.context = context + self.device = device + + def estimate(self, model: Path | ModelConfiguration) -> MemoryUsage: + """Estimate performance.""" + with log_action("Getting the memory usage metrics ..."): + model_path = ( + Path(model.model_path) + if isinstance(model, ModelConfiguration) + else model + ) + + vela_perf_metrics = vela_perf.estimate_performance( + model_path, self.device.compiler_options + ) + + return MemoryUsage( + vela_perf_metrics.sram_memory_area_size, + vela_perf_metrics.dram_memory_area_size, + vela_perf_metrics.unknown_memory_area_size, + vela_perf_metrics.on_chip_flash_memory_area_size, + vela_perf_metrics.off_chip_flash_memory_area_size, + ) + + +class CorstonePerformanceEstimator( + PerformanceEstimator[Union[Path, ModelConfiguration], NPUCycles] +): + """Corstone-based performance estimator.""" + + def __init__( + self, context: Context, device: EthosUConfiguration, backend: str + ) -> None: + """Init Corstone-based performance estimator.""" + self.context = context + self.device = device + self.backend = backend + + def estimate(self, model: Path | ModelConfiguration) -> NPUCycles: + """Estimate performance.""" + with log_action(f"Getting the performance metrics for '{self.backend}' ..."): + logger.info( + "WARNING: This task may require several minutes " + "(press ctrl-c to interrupt)" + ) + + model_path = ( + Path(model.model_path) + if isinstance(model, ModelConfiguration) + else model + ) + + optimized_model_path = self.context.get_model_path( + f"{model_path.stem}_vela.tflite" + ) + + vela_comp.optimize_model( + model_path, self.device.compiler_options, optimized_model_path + ) + + model_info = ModelInfo(model_path=optimized_model_path) + device_info = DeviceInfo( + device_type=self.device.target, # type: ignore + mac=self.device.mac, + ) + + corstone_perf_metrics = estimate_performance( + model_info, device_info, self.backend + ) + + return NPUCycles( + corstone_perf_metrics.npu_active_cycles, + corstone_perf_metrics.npu_idle_cycles, + corstone_perf_metrics.npu_total_cycles, + corstone_perf_metrics.npu_axi0_rd_data_beat_received, + corstone_perf_metrics.npu_axi0_wr_data_beat_written, + corstone_perf_metrics.npu_axi1_rd_data_beat_received, + ) + + +class EthosUPerformanceEstimator( + PerformanceEstimator[Union[Path, ModelConfiguration], PerformanceMetrics] +): + """Ethos-U performance estimator.""" + + def __init__( + self, + context: Context, + device: EthosUConfiguration, + backends: list[str] | None = None, + ) -> None: + """Init performance estimator.""" + self.context = context + self.device = device + if backends is None: + backends = ["Vela"] # Only Vela is always available as default + for backend in backends: + if backend != "Vela" and not is_supported(backend): + raise ValueError( + f"Unsupported backend '{backend}'. " + f"Only 'Vela' and {supported_backends()} " + "are supported." + ) + self.backends = set(backends) + + def estimate(self, model: Path | ModelConfiguration) -> PerformanceMetrics: + """Estimate performance.""" + model_path = ( + Path(model.model_path) if isinstance(model, ModelConfiguration) else model + ) + + tflite_model = get_tflite_model(model_path, self.context) + + memory_usage = None + npu_cycles = None + + for backend in self.backends: + if backend == "Vela": + vela_estimator = VelaPerformanceEstimator(self.context, self.device) + memory_usage = vela_estimator.estimate(tflite_model) + elif backend in supported_backends(): + corstone_estimator = CorstonePerformanceEstimator( + self.context, self.device, backend + ) + npu_cycles = corstone_estimator.estimate(tflite_model) + else: + logger.warning( + "Backend '%s' is not supported for Ethos-U performance " + "estimation.", + backend, + ) + + return PerformanceMetrics(self.device, npu_cycles, memory_usage) diff --git a/src/mlia/target/ethos_u/reporters.py b/src/mlia/target/ethos_u/reporters.py new file mode 100644 index 0000000..dbc6f4a --- /dev/null +++ b/src/mlia/target/ethos_u/reporters.py @@ -0,0 +1,385 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Reports module.""" +from __future__ import annotations + +from collections import defaultdict +from typing import Any +from typing import Callable + +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators +from mlia.core.advice_generation import Advice +from mlia.core.reporters import report_advice +from mlia.core.reporting import BytesCell +from mlia.core.reporting import Cell +from mlia.core.reporting import ClockCell +from mlia.core.reporting import Column +from mlia.core.reporting import CompoundFormatter +from mlia.core.reporting import CyclesCell +from mlia.core.reporting import Format +from mlia.core.reporting import NestedReport +from mlia.core.reporting import Report +from mlia.core.reporting import ReportItem +from mlia.core.reporting import SingleRow +from mlia.core.reporting import Table +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.performance import PerformanceMetrics +from mlia.utils.console import style_improvement +from mlia.utils.types import is_list_of + + +def report_operators_stat(operators: Operators) -> Report: + """Return table representation for the ops stats.""" + columns = [ + Column("Number of operators", alias="num_of_operators"), + Column("Number of NPU supported operators", "num_of_npu_supported_operators"), + Column("Unsupported ops ratio", "npu_unsupported_ratio"), + ] + rows = [ + ( + operators.total_number, + operators.npu_supported_number, + Cell( + operators.npu_unsupported_ratio * 100, + fmt=Format(str_fmt="{:.0f}%".format), + ), + ) + ] + + return SingleRow( + columns, rows, name="Operators statistics", alias="operators_stats" + ) + + +def report_operators(ops: list[Operator]) -> Report: + """Return table representation for the list of operators.""" + columns = [ + Column("#", only_for=["plain_text"]), + Column( + "Operator name", + alias="operator_name", + fmt=Format(wrap_width=30), + ), + Column( + "Operator type", + alias="operator_type", + fmt=Format(wrap_width=25), + ), + Column( + "Placement", + alias="placement", + fmt=Format(wrap_width=20), + ), + Column( + "Notes", + alias="notes", + fmt=Format(wrap_width=35), + ), + ] + + rows = [ + ( + i + 1, + op.name, + op.op_type, + Cell( + "NPU" if (npu := op.run_on_npu.supported) else "CPU", + Format(style=style_improvement(npu)), + ), + Table( + columns=[ + Column( + "Note", + alias="note", + fmt=Format(wrap_width=35), + ) + ], + rows=[ + (Cell(item, Format(str_fmt=lambda x: f"* {x}")),) + for reason in op.run_on_npu.reasons + for item in reason + if item + ], + name="Notes", + ), + ) + for i, op in enumerate(ops) + ] + + return Table(columns, rows, name="Operators", alias="operators") + + +def report_device_details(device: EthosUConfiguration) -> Report: + """Return table representation for the device.""" + compiler_config = device.resolved_compiler_config + + memory_settings = [ + ReportItem( + "Const mem area", + "const_mem_area", + compiler_config["const_mem_area"], + ), + ReportItem( + "Arena mem area", + "arena_mem_area", + compiler_config["arena_mem_area"], + ), + ReportItem( + "Cache mem area", + "cache_mem_area", + compiler_config["cache_mem_area"], + ), + ReportItem( + "Arena cache size", + "arena_cache_size", + BytesCell(compiler_config["arena_cache_size"]), + ), + ] + + mem_areas_settings = [ + ReportItem( + f"{mem_area_name}", + mem_area_name, + None, + nested_items=[ + ReportItem( + "Clock scales", + "clock_scales", + mem_area_settings["clock_scales"], + ), + ReportItem( + "Burst length", + "burst_length", + BytesCell(mem_area_settings["burst_length"]), + ), + ReportItem( + "Read latency", + "read_latency", + CyclesCell(mem_area_settings["read_latency"]), + ), + ReportItem( + "Write latency", + "write_latency", + CyclesCell(mem_area_settings["write_latency"]), + ), + ], + ) + for mem_area_name, mem_area_settings in compiler_config["memory_area"].items() + ] + + system_settings = [ + ReportItem( + "Accelerator clock", + "accelerator_clock", + ClockCell(compiler_config["core_clock"]), + ), + ReportItem( + "AXI0 port", + "axi0_port", + compiler_config["axi0_port"], + ), + ReportItem( + "AXI1 port", + "axi1_port", + compiler_config["axi1_port"], + ), + ReportItem( + "Memory area settings", "memory_area", None, nested_items=mem_areas_settings + ), + ] + + arch_settings = [ + ReportItem( + "Permanent storage mem area", + "permanent_storage_mem_area", + compiler_config["permanent_storage_mem_area"], + ), + ReportItem( + "Feature map storage mem area", + "feature_map_storage_mem_area", + compiler_config["feature_map_storage_mem_area"], + ), + ReportItem( + "Fast storage mem area", + "fast_storage_mem_area", + compiler_config["fast_storage_mem_area"], + ), + ] + + return NestedReport( + "Device information", + "device", + [ + ReportItem("Target", alias="target", value=device.target), + ReportItem("MAC", alias="mac", value=device.mac), + ReportItem( + "Memory mode", + alias="memory_mode", + value=compiler_config["memory_mode"], + nested_items=memory_settings, + ), + ReportItem( + "System config", + alias="system_config", + value=compiler_config["system_config"], + nested_items=system_settings, + ), + ReportItem( + "Architecture settings", + "arch_settings", + None, + nested_items=arch_settings, + ), + ], + ) + + +def metrics_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: + """Convert perf metrics object into list of records.""" + perf_metrics = [item.in_kilobytes() for item in perf_metrics] + + def _cycles_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: + metric_map = defaultdict(list) + for metrics in perf_metrics: + if not metrics.npu_cycles: + return [] + metric_map["NPU active cycles"].append(metrics.npu_cycles.npu_active_cycles) + metric_map["NPU idle cycles"].append(metrics.npu_cycles.npu_idle_cycles) + metric_map["NPU total cycles"].append(metrics.npu_cycles.npu_total_cycles) + + return [ + (name, *(Cell(value, Format(str_fmt="12,d")) for value in values), "cycles") + for name, values in metric_map.items() + ] + + def _memory_usage_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: + metric_map = defaultdict(list) + for metrics in perf_metrics: + if not metrics.memory_usage: + return [] + metric_map["SRAM used"].append(metrics.memory_usage.sram_memory_area_size) + metric_map["DRAM used"].append(metrics.memory_usage.dram_memory_area_size) + metric_map["Unknown memory area used"].append( + metrics.memory_usage.unknown_memory_area_size + ) + metric_map["On-chip flash used"].append( + metrics.memory_usage.on_chip_flash_memory_area_size + ) + metric_map["Off-chip flash used"].append( + metrics.memory_usage.off_chip_flash_memory_area_size + ) + + return [ + (name, *(Cell(value, Format(str_fmt="12.2f")) for value in values), "KiB") + for name, values in metric_map.items() + if all(val > 0 for val in values) + ] + + def _data_beats_as_records(perf_metrics: list[PerformanceMetrics]) -> list[tuple]: + metric_map = defaultdict(list) + for metrics in perf_metrics: + if not metrics.npu_cycles: + return [] + metric_map["NPU AXI0 RD data beat received"].append( + metrics.npu_cycles.npu_axi0_rd_data_beat_received + ) + metric_map["NPU AXI0 WR data beat written"].append( + metrics.npu_cycles.npu_axi0_wr_data_beat_written + ) + metric_map["NPU AXI1 RD data beat received"].append( + metrics.npu_cycles.npu_axi1_rd_data_beat_received + ) + + return [ + (name, *(Cell(value, Format(str_fmt="12,d")) for value in values), "beats") + for name, values in metric_map.items() + ] + + return [ + metrics + for metrics_func in ( + _memory_usage_as_records, + _cycles_as_records, + _data_beats_as_records, + ) + for metrics in metrics_func(perf_metrics) + ] + + +def report_perf_metrics( + perf_metrics: PerformanceMetrics | list[PerformanceMetrics], +) -> Report: + """Return comparison table for the performance metrics.""" + if isinstance(perf_metrics, PerformanceMetrics): + perf_metrics = [perf_metrics] + + rows = metrics_as_records(perf_metrics) + + if len(perf_metrics) == 2: + return Table( + columns=[ + Column("Metric", alias="metric", fmt=Format(wrap_width=30)), + Column("Original", alias="original", fmt=Format(wrap_width=15)), + Column("Optimized", alias="optimized", fmt=Format(wrap_width=15)), + Column("Unit", alias="unit", fmt=Format(wrap_width=15)), + Column("Improvement (%)", alias="improvement"), + ], + rows=[ + ( + metric, + original_value, + optimized_value, + unit, + Cell( + ( + diff := 100 + - (optimized_value.value / original_value.value * 100) + ), + Format(str_fmt="15.2f", style=style_improvement(diff > 0)), + ) + if original_value.value != 0 + else None, + ) + for metric, original_value, optimized_value, unit in rows + ], + name="Performance metrics", + alias="performance_metrics", + notes="IMPORTANT: The performance figures above refer to NPU only", + ) + + return Table( + columns=[ + Column("Metric", alias="metric", fmt=Format(wrap_width=30)), + Column("Value", alias="value", fmt=Format(wrap_width=15)), + Column("Unit", alias="unit", fmt=Format(wrap_width=15)), + ], + rows=rows, + name="Performance metrics", + alias="performance_metrics", + notes="IMPORTANT: The performance figures above refer to NPU only", + ) + + +def ethos_u_formatters(data: Any) -> Callable[[Any], Report]: + """Find appropriate formatter for the provided data.""" + if isinstance(data, PerformanceMetrics) or is_list_of(data, PerformanceMetrics, 2): + return report_perf_metrics + + if is_list_of(data, Advice): + return report_advice + + if is_list_of(data, Operator): + return report_operators + + if isinstance(data, Operators): + return report_operators_stat + + if isinstance(data, EthosUConfiguration): + return report_device_details + + if isinstance(data, (list, tuple)): + formatters = [ethos_u_formatters(item) for item in data] + return CompoundFormatter(formatters) + + raise Exception(f"Unable to find appropriate formatter for {data}") diff --git a/src/mlia/target/tosa/__init__.py b/src/mlia/target/tosa/__init__.py new file mode 100644 index 0000000..762c831 --- /dev/null +++ b/src/mlia/target/tosa/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA target module.""" diff --git a/src/mlia/target/tosa/advice_generation.py b/src/mlia/target/tosa/advice_generation.py new file mode 100644 index 0000000..f531b84 --- /dev/null +++ b/src/mlia/target/tosa/advice_generation.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA advice generation.""" +from functools import singledispatchmethod + +from mlia.core.advice_generation import advice_category +from mlia.core.advice_generation import FactBasedAdviceProducer +from mlia.core.common import AdviceCategory +from mlia.core.common import DataItem +from mlia.target.tosa.data_analysis import ModelIsNotTOSACompatible +from mlia.target.tosa.data_analysis import ModelIsTOSACompatible + + +class TOSAAdviceProducer(FactBasedAdviceProducer): + """TOSA advice producer.""" + + @singledispatchmethod + def produce_advice(self, _data_item: DataItem) -> None: # type: ignore + """Produce advice.""" + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_model_is_tosa_compatible( + self, _data_item: ModelIsTOSACompatible + ) -> None: + """Advice for TOSA compatibility.""" + self.add_advice(["Model is fully TOSA compatible."]) + + @produce_advice.register + @advice_category(AdviceCategory.ALL, AdviceCategory.OPERATORS) + def handle_model_is_not_tosa_compatible( + self, _data_item: ModelIsNotTOSACompatible + ) -> None: + """Advice for TOSA compatibility.""" + self.add_advice( + [ + "Some operators in the model are not TOSA compatible. " + "Please, refer to the operators table for more information." + ] + ) diff --git a/src/mlia/target/tosa/advisor.py b/src/mlia/target/tosa/advisor.py new file mode 100644 index 0000000..2739dfd --- /dev/null +++ b/src/mlia/target/tosa/advisor.py @@ -0,0 +1,94 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA advisor.""" +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from mlia.core.advice_generation import AdviceCategory +from mlia.core.advice_generation import AdviceProducer +from mlia.core.advisor import DefaultInferenceAdvisor +from mlia.core.advisor import InferenceAdvisor +from mlia.core.context import Context +from mlia.core.context import ExecutionContext +from mlia.core.data_analysis import DataAnalyzer +from mlia.core.data_collection import DataCollector +from mlia.core.events import Event +from mlia.core.typing import PathOrFileLike +from mlia.target.tosa.advice_generation import TOSAAdviceProducer +from mlia.target.tosa.config import TOSAConfiguration +from mlia.target.tosa.data_analysis import TOSADataAnalyzer +from mlia.target.tosa.data_collection import TOSAOperatorCompatibility +from mlia.target.tosa.events import TOSAAdvisorStartedEvent +from mlia.target.tosa.handlers import TOSAEventHandler + + +class TOSAInferenceAdvisor(DefaultInferenceAdvisor): + """TOSA inference advisor.""" + + @classmethod + def name(cls) -> str: + """Return name of the advisor.""" + return "tosa_inference_advisor" + + def get_collectors(self, context: Context) -> list[DataCollector]: + """Return list of the data collectors.""" + model = self.get_model(context) + + collectors: list[DataCollector] = [] + + if AdviceCategory.OPERATORS in context.advice_category: + collectors.append(TOSAOperatorCompatibility(model)) + + return collectors + + def get_analyzers(self, context: Context) -> list[DataAnalyzer]: + """Return list of the data analyzers.""" + return [ + TOSADataAnalyzer(), + ] + + def get_producers(self, context: Context) -> list[AdviceProducer]: + """Return list of the advice producers.""" + return [ + TOSAAdviceProducer(), + ] + + def get_events(self, context: Context) -> list[Event]: + """Return list of the startup events.""" + model = self.get_model(context) + target_profile = self.get_target_profile(context) + + return [ + TOSAAdvisorStartedEvent(model, TOSAConfiguration(target_profile)), + ] + + +def configure_and_get_tosa_advisor( + context: ExecutionContext, + target_profile: str, + model: str | Path, + output: PathOrFileLike | None = None, + **_extra_args: Any, +) -> InferenceAdvisor: + """Create and configure TOSA advisor.""" + if context.event_handlers is None: + context.event_handlers = [TOSAEventHandler(output)] + + if context.config_parameters is None: + context.config_parameters = _get_config_parameters(model, target_profile) + + return TOSAInferenceAdvisor() + + +def _get_config_parameters(model: str | Path, target_profile: str) -> dict[str, Any]: + """Get configuration parameters for the advisor.""" + advisor_parameters: dict[str, Any] = { + "tosa_inference_advisor": { + "model": str(model), + "target_profile": target_profile, + } + } + + return advisor_parameters diff --git a/src/mlia/target/tosa/config.py b/src/mlia/target/tosa/config.py new file mode 100644 index 0000000..22805b7 --- /dev/null +++ b/src/mlia/target/tosa/config.py @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA target configuration.""" +from mlia.target.config import IPConfiguration +from mlia.utils.filesystem import get_profile + + +class TOSAConfiguration(IPConfiguration): # pylint: disable=too-few-public-methods + """TOSA configuration.""" + + def __init__(self, target_profile: str) -> None: + """Init configuration.""" + target_data = get_profile(target_profile) + target = target_data["target"] + + if target != "tosa": + raise Exception(f"Wrong target {target} for TOSA configuration") + + super().__init__(target) diff --git a/src/mlia/target/tosa/data_analysis.py b/src/mlia/target/tosa/data_analysis.py new file mode 100644 index 0000000..7cbd61d --- /dev/null +++ b/src/mlia/target/tosa/data_analysis.py @@ -0,0 +1,36 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA data analysis module.""" +from dataclasses import dataclass +from functools import singledispatchmethod + +from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo +from mlia.core.common import DataItem +from mlia.core.data_analysis import Fact +from mlia.core.data_analysis import FactExtractor + + +@dataclass +class ModelIsTOSACompatible(Fact): + """Model is completely TOSA compatible.""" + + +@dataclass +class ModelIsNotTOSACompatible(Fact): + """Model is not TOSA compatible.""" + + +class TOSADataAnalyzer(FactExtractor): + """TOSA data analyzer.""" + + @singledispatchmethod + def analyze_data(self, data_item: DataItem) -> None: # type: ignore + """Analyse the data.""" + + @analyze_data.register + def analyze_tosa_compatibility(self, data_item: TOSACompatibilityInfo) -> None: + """Analyse TOSA compatibility information.""" + if data_item.tosa_compatible: + self.add_fact(ModelIsTOSACompatible()) + else: + self.add_fact(ModelIsNotTOSACompatible()) diff --git a/src/mlia/target/tosa/data_collection.py b/src/mlia/target/tosa/data_collection.py new file mode 100644 index 0000000..105c501 --- /dev/null +++ b/src/mlia/target/tosa/data_collection.py @@ -0,0 +1,30 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA data collection module.""" +from pathlib import Path + +from mlia.backend.tosa_checker.compat import get_tosa_compatibility_info +from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo +from mlia.core.data_collection import ContextAwareDataCollector +from mlia.nn.tensorflow.config import get_tflite_model +from mlia.utils.logging import log_action + + +class TOSAOperatorCompatibility(ContextAwareDataCollector): + """Collect operator compatibility information.""" + + def __init__(self, model: Path) -> None: + """Init the data collector.""" + self.model = model + + def collect_data(self) -> TOSACompatibilityInfo: + """Collect TOSA compatibility information.""" + tflite_model = get_tflite_model(self.model, self.context) + + with log_action("Checking operator compatibility ..."): + return get_tosa_compatibility_info(tflite_model.model_path) + + @classmethod + def name(cls) -> str: + """Return name of the collector.""" + return "tosa_operator_compatibility" diff --git a/src/mlia/target/tosa/events.py b/src/mlia/target/tosa/events.py new file mode 100644 index 0000000..67d499d --- /dev/null +++ b/src/mlia/target/tosa/events.py @@ -0,0 +1,24 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA advisor events.""" +from dataclasses import dataclass +from pathlib import Path + +from mlia.core.events import Event +from mlia.core.events import EventDispatcher +from mlia.target.tosa.config import TOSAConfiguration + + +@dataclass +class TOSAAdvisorStartedEvent(Event): + """Event with TOSA advisor parameters.""" + + model: Path + device: TOSAConfiguration + + +class TOSAAdvisorEventHandler(EventDispatcher): + """Event handler for the TOSA inference advisor.""" + + def on_tosa_advisor_started(self, event: TOSAAdvisorStartedEvent) -> None: + """Handle TOSAAdvisorStartedEvent event.""" diff --git a/src/mlia/target/tosa/handlers.py b/src/mlia/target/tosa/handlers.py new file mode 100644 index 0000000..863558c --- /dev/null +++ b/src/mlia/target/tosa/handlers.py @@ -0,0 +1,36 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA Advisor event handlers.""" +# pylint: disable=R0801 +from __future__ import annotations + +import logging + +from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo +from mlia.core.events import CollectedDataEvent +from mlia.core.handlers import WorkflowEventsHandler +from mlia.core.typing import PathOrFileLike +from mlia.target.tosa.events import TOSAAdvisorEventHandler +from mlia.target.tosa.events import TOSAAdvisorStartedEvent +from mlia.target.tosa.reporters import tosa_formatters + +logger = logging.getLogger(__name__) + + +class TOSAEventHandler(WorkflowEventsHandler, TOSAAdvisorEventHandler): + """Event handler for TOSA advisor.""" + + def __init__(self, output: PathOrFileLike | None = None) -> None: + """Init event handler.""" + super().__init__(tosa_formatters, output) + + def on_tosa_advisor_started(self, event: TOSAAdvisorStartedEvent) -> None: + """Handle TOSAAdvisorStartedEvent event.""" + self.reporter.submit(event.device) + + def on_collected_data(self, event: CollectedDataEvent) -> None: + """Handle CollectedDataEvent event.""" + data_item = event.data_item + + if isinstance(data_item, TOSACompatibilityInfo): + self.reporter.submit(data_item.operators, delay_print=True) diff --git a/src/mlia/target/tosa/operators.py b/src/mlia/target/tosa/operators.py new file mode 100644 index 0000000..b75ceb0 --- /dev/null +++ b/src/mlia/target/tosa/operators.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Operators module.""" + + +def report() -> None: + """Generate supported operators report.""" + raise Exception( + "Generating a supported operators report is not " + "currently supported with TOSA target profile." + ) diff --git a/src/mlia/target/tosa/reporters.py b/src/mlia/target/tosa/reporters.py new file mode 100644 index 0000000..01fbb97 --- /dev/null +++ b/src/mlia/target/tosa/reporters.py @@ -0,0 +1,83 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Reports module.""" +from __future__ import annotations + +from typing import Any +from typing import Callable + +from mlia.backend.tosa_checker.compat import Operator +from mlia.core.advice_generation import Advice +from mlia.core.reporters import report_advice +from mlia.core.reporting import Cell +from mlia.core.reporting import Column +from mlia.core.reporting import Format +from mlia.core.reporting import NestedReport +from mlia.core.reporting import Report +from mlia.core.reporting import ReportItem +from mlia.core.reporting import Table +from mlia.target.tosa.config import TOSAConfiguration +from mlia.utils.console import style_improvement +from mlia.utils.types import is_list_of + + +def report_device(device: TOSAConfiguration) -> Report: + """Generate report for the device.""" + return NestedReport( + "Device information", + "device", + [ + ReportItem("Target", alias="target", value=device.target), + ], + ) + + +def report_tosa_operators(ops: list[Operator]) -> Report: + """Generate report for the operators.""" + return Table( + [ + Column("#", only_for=["plain_text"]), + Column( + "Operator location", + alias="operator_location", + fmt=Format(wrap_width=30), + ), + Column("Operator name", alias="operator_name", fmt=Format(wrap_width=20)), + Column( + "TOSA compatibility", + alias="is_tosa_compatible", + fmt=Format(wrap_width=25), + ), + ], + [ + ( + index + 1, + op.location, + op.name, + Cell( + op.is_tosa_compatible, + Format( + style=style_improvement(op.is_tosa_compatible), + str_fmt=lambda v: "Compatible" if v else "Not compatible", + ), + ), + ) + for index, op in enumerate(ops) + ], + name="Operators", + alias="operators", + ) + + +def tosa_formatters(data: Any) -> Callable[[Any], Report]: + """Find appropriate formatter for the provided data.""" + if is_list_of(data, Advice): + return report_advice + + if isinstance(data, TOSAConfiguration): + return report_device + + if is_list_of(data, Operator): + return report_tosa_operators + + raise Exception(f"Unable to find appropriate formatter for {data}") diff --git a/tests/conftest.py b/tests/conftest.py index feb2aa0..e27acaf 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -12,10 +12,10 @@ import tensorflow as tf from mlia.backend.vela.compiler import optimize_model from mlia.core.context import ExecutionContext -from mlia.devices.ethosu.config import EthosUConfiguration from mlia.nn.tensorflow.utils import convert_to_tflite from mlia.nn.tensorflow.utils import save_keras_model from mlia.nn.tensorflow.utils import save_tflite_model +from mlia.target.ethos_u.config import EthosUConfiguration @pytest.fixture(scope="session", name="test_resources_path") diff --git a/tests/test_api.py b/tests/test_api.py index b9ab8ea..fbc558b 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -15,8 +15,8 @@ from mlia.api import get_advisor from mlia.core.common import AdviceCategory from mlia.core.context import Context from mlia.core.context import ExecutionContext -from mlia.devices.ethosu.advisor import EthosUInferenceAdvisor -from mlia.devices.tosa.advisor import TOSAInferenceAdvisor +from mlia.target.ethos_u.advisor import EthosUInferenceAdvisor +from mlia.target.tosa.advisor import TOSAInferenceAdvisor def test_get_advice_no_target_provided(test_keras_model: Path) -> None: @@ -118,12 +118,12 @@ def test_get_advisor( [ [ "ethos-u55-128", - "mlia.devices.ethosu.operators.generate_supported_operators_report", + "mlia.target.ethos_u.operators.generate_supported_operators_report", None, ], [ "ethos-u65-256", - "mlia.devices.ethosu.operators.generate_supported_operators_report", + "mlia.target.ethos_u.operators.generate_supported_operators_report", None, ], [ diff --git a/tests/test_backend_vela_compat.py b/tests/test_backend_vela_compat.py index 6f7a41c..a2e7f90 100644 --- a/tests/test_backend_vela_compat.py +++ b/tests/test_backend_vela_compat.py @@ -10,7 +10,7 @@ from mlia.backend.vela.compat import NpuSupported from mlia.backend.vela.compat import Operator from mlia.backend.vela.compat import Operators from mlia.backend.vela.compat import supported_operators -from mlia.devices.ethosu.config import EthosUConfiguration +from mlia.target.ethos_u.config import EthosUConfiguration from mlia.utils.filesystem import working_directory diff --git a/tests/test_backend_vela_compiler.py b/tests/test_backend_vela_compiler.py index 40268ae..ff07c74 100644 --- a/tests/test_backend_vela_compiler.py +++ b/tests/test_backend_vela_compiler.py @@ -10,7 +10,7 @@ from mlia.backend.vela.compiler import optimize_model from mlia.backend.vela.compiler import OptimizedModel from mlia.backend.vela.compiler import VelaCompiler from mlia.backend.vela.compiler import VelaCompilerOptions -from mlia.devices.ethosu.config import EthosUConfiguration +from mlia.target.ethos_u.config import EthosUConfiguration def test_default_vela_compiler() -> None: diff --git a/tests/test_backend_vela_performance.py b/tests/test_backend_vela_performance.py index a1c806c..34c11ab 100644 --- a/tests/test_backend_vela_performance.py +++ b/tests/test_backend_vela_performance.py @@ -9,7 +9,7 @@ import pytest from mlia.backend.vela.compiler import optimize_model from mlia.backend.vela.performance import estimate_performance from mlia.backend.vela.performance import PerformanceMetrics -from mlia.devices.ethosu.config import EthosUConfiguration +from mlia.target.ethos_u.config import EthosUConfiguration def test_estimate_performance(test_tflite_model: Path) -> None: diff --git a/tests/test_cli_commands.py b/tests/test_cli_commands.py index 77e1f88..aed5c42 100644 --- a/tests/test_cli_commands.py +++ b/tests/test_cli_commands.py @@ -18,10 +18,10 @@ from mlia.cli.commands import operators from mlia.cli.commands import optimization from mlia.cli.commands import performance from mlia.core.context import ExecutionContext -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.performance import MemoryUsage -from mlia.devices.ethosu.performance import NPUCycles -from mlia.devices.ethosu.performance import PerformanceMetrics +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.performance import MemoryUsage +from mlia.target.ethos_u.performance import NPUCycles +from mlia.target.ethos_u.performance import PerformanceMetrics def test_operators_expected_parameters(sample_context: ExecutionContext) -> None: @@ -133,7 +133,7 @@ def mock_performance_estimation(monkeypatch: pytest.MonkeyPatch) -> None: MemoryUsage(1, 2, 3, 4, 5), ) monkeypatch.setattr( - "mlia.devices.ethosu.data_collection.EthosUPerformanceEstimator.estimate", + "mlia.target.ethos_u.data_collection.EthosUPerformanceEstimator.estimate", MagicMock(return_value=metrics), ) diff --git a/tests/test_devices_cortexa_advice_generation.py b/tests/test_devices_cortexa_advice_generation.py deleted file mode 100644 index fd669d4..0000000 --- a/tests/test_devices_cortexa_advice_generation.py +++ /dev/null @@ -1,196 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for advice generation.""" -from __future__ import annotations - -import pytest - -from mlia.core.advice_generation import Advice -from mlia.core.common import AdviceCategory -from mlia.core.common import DataItem -from mlia.core.context import ExecutionContext -from mlia.devices.cortexa.advice_generation import CortexAAdviceProducer -from mlia.devices.cortexa.data_analysis import ModelHasCustomOperators -from mlia.devices.cortexa.data_analysis import ModelIsCortexACompatible -from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible -from mlia.devices.cortexa.data_analysis import ModelIsNotTFLiteCompatible -from mlia.devices.cortexa.data_analysis import TFLiteCompatibilityCheckFailed -from mlia.devices.cortexa.operator_compatibility import ARMNN_TFLITE_DELEGATE -from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION - -BACKEND_INFO = ( - f"{ARMNN_TFLITE_DELEGATE['metadata']['backend']} " - f"{ARMNN_TFLITE_DELEGATE['metadata']['version']}" -) - - -@pytest.mark.parametrize( - "input_data, advice_category, expected_advice", - [ - [ - ModelIsNotCortexACompatible(BACKEND_INFO, {"UNSUPPORTED_OP"}, {}), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "The following operators are not supported by " - f"{BACKEND_INFO} and will fall back to the TensorFlow " - "Lite runtime:", - " - UNSUPPORTED_OP", - ] - ), - Advice( - [ - "Please, refer to the full table of operators above " - "for more information.", - CortexAAdviceProducer.cortex_a_disclaimer, - ] - ), - ], - ], - [ - ModelIsNotCortexACompatible( - BACKEND_INFO, - {"UNSUPPORTED_OP"}, - { - "CONV_2D": ModelIsNotCortexACompatible.ActivationFunctionSupport( - used_unsupported={TFL_ACTIVATION_FUNCTION.SIGN_BIT.name}, - supported={"RELU"}, - ) - }, - ), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "The following operators are not supported by " - f"{BACKEND_INFO} and will fall back to the TensorFlow " - "Lite runtime:", - " - UNSUPPORTED_OP", - ] - ), - Advice( - [ - "The fused activation functions of the following " - f"operators are not supported by {BACKEND_INFO}. " - "Please consider using one of the supported activation " - "functions instead:", - " - CONV_2D\n" - " - Used unsupported: {'SIGN_BIT'}\n" - " - Supported: {'RELU'}", - ] - ), - Advice( - [ - "Please, refer to the full table of operators above " - "for more information.", - CortexAAdviceProducer.cortex_a_disclaimer, - ] - ), - ], - ], - [ - ModelIsCortexACompatible(BACKEND_INFO), - AdviceCategory.OPERATORS, - [ - Advice( - [ - f"Model is fully compatible with {BACKEND_INFO} for Cortex-A.", - CortexAAdviceProducer.cortex_a_disclaimer, - ] - ) - ], - ], - [ - ModelIsNotTFLiteCompatible( - flex_ops=["flex_op1", "flex_op2"], - custom_ops=["custom_op1", "custom_op2"], - ), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "The following operators are not natively " - "supported by TensorFlow Lite: flex_op1, flex_op2.", - "Using select TensorFlow operators in TensorFlow Lite model " - "requires special initialization of TFLiteConverter and " - "TensorFlow Lite run-time.", - "Please refer to the TensorFlow documentation for " - "more details: " - "https://www.tensorflow.org/lite/guide/ops_select", - "Note, such models are not supported by " - "the ML Inference Advisor.", - ] - ), - Advice( - [ - "The following operators appear to be custom and not natively " - "supported by TensorFlow Lite: custom_op1, custom_op2.", - "Using custom operators in TensorFlow Lite model " - "requires special initialization of TFLiteConverter and " - "TensorFlow Lite run-time.", - "Please refer to the TensorFlow documentation for " - "more details: " - "https://www.tensorflow.org/lite/guide/ops_custom", - "Note, such models are not supported by " - "the ML Inference Advisor.", - ] - ), - ], - ], - [ - ModelIsNotTFLiteCompatible(), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "Model could not be converted into TensorFlow Lite format.", - "Please refer to the table for more details.", - ] - ), - ], - ], - [ - ModelHasCustomOperators(), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "Models with custom operators require special initialization " - "and currently are not supported by the ML Inference Advisor.", - ] - ), - ], - ], - [ - TFLiteCompatibilityCheckFailed(), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "Model could not be converted into TensorFlow Lite format.", - "Please refer to the table for more details.", - ] - ), - ], - ], - ], -) -def test_cortex_a_advice_producer( - tmpdir: str, - input_data: DataItem, - advice_category: AdviceCategory, - expected_advice: list[Advice], -) -> None: - """Test Cortex-A advice producer.""" - producer = CortexAAdviceProducer() - - context = ExecutionContext( - advice_category=advice_category, - working_dir=tmpdir, - ) - - producer.set_context(context) - producer.produce_advice(input_data) - - assert producer.get_advice() == expected_advice diff --git a/tests/test_devices_cortexa_advisor.py b/tests/test_devices_cortexa_advisor.py deleted file mode 100644 index 8cd60d6..0000000 --- a/tests/test_devices_cortexa_advisor.py +++ /dev/null @@ -1,34 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Cortex-A MLIA module.""" -from pathlib import Path - -from mlia.core.context import ExecutionContext -from mlia.core.workflow import DefaultWorkflowExecutor -from mlia.devices.cortexa.advisor import configure_and_get_cortexa_advisor -from mlia.devices.cortexa.advisor import CortexAInferenceAdvisor - - -def test_advisor_metadata() -> None: - """Test advisor metadata.""" - assert CortexAInferenceAdvisor.name() == "cortex_a_inference_advisor" - - -def test_configure_and_get_cortex_a_advisor(test_tflite_model: Path) -> None: - """Test Cortex-A advisor configuration.""" - ctx = ExecutionContext() - - advisor = configure_and_get_cortexa_advisor(ctx, "cortex-a", test_tflite_model) - workflow = advisor.configure(ctx) - - assert isinstance(advisor, CortexAInferenceAdvisor) - - assert ctx.event_handlers is not None - assert ctx.config_parameters == { - "cortex_a_inference_advisor": { - "model": str(test_tflite_model), - "target_profile": "cortex-a", - } - } - - assert isinstance(workflow, DefaultWorkflowExecutor) diff --git a/tests/test_devices_cortexa_data_analysis.py b/tests/test_devices_cortexa_data_analysis.py deleted file mode 100644 index ed30b9a..0000000 --- a/tests/test_devices_cortexa_data_analysis.py +++ /dev/null @@ -1,162 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Cortex-A data analysis module.""" -from __future__ import annotations - -import pytest - -from mlia.core.common import DataItem -from mlia.core.data_analysis import Fact -from mlia.devices.cortexa.data_analysis import CortexADataAnalyzer -from mlia.devices.cortexa.data_analysis import ModelHasCustomOperators -from mlia.devices.cortexa.data_analysis import ModelIsCortexACompatible -from mlia.devices.cortexa.data_analysis import ModelIsNotCortexACompatible -from mlia.devices.cortexa.data_analysis import ModelIsNotTFLiteCompatible -from mlia.devices.cortexa.data_analysis import TFLiteCompatibilityCheckFailed -from mlia.devices.cortexa.operator_compatibility import ARMNN_TFLITE_DELEGATE -from mlia.devices.cortexa.operators import CortexACompatibilityInfo -from mlia.devices.cortexa.operators import Operator -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityStatus -from mlia.nn.tensorflow.tflite_compat import TFLiteConversionError -from mlia.nn.tensorflow.tflite_compat import TFLiteConversionErrorCode -from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION - -BACKEND_INFO = ( - f"{ARMNN_TFLITE_DELEGATE['metadata']['backend']} " - f"{ARMNN_TFLITE_DELEGATE['metadata']['version']}" -) - - -@pytest.mark.parametrize( - "input_data, expected_facts", - [ - [ - CortexACompatibilityInfo(True, []), - [ModelIsCortexACompatible(BACKEND_INFO)], - ], - [ - CortexACompatibilityInfo( - True, - [ - Operator( - "CONV_2D", - "somewhere", - support_type=Operator.SupportType.COMPATIBLE, - activation_func=TFL_ACTIVATION_FUNCTION.NONE, - ), - Operator( - "CUSTOM", - "somewhere else", - support_type=Operator.SupportType.COMPATIBLE, - activation_func=TFL_ACTIVATION_FUNCTION.SIGN_BIT, - custom_name="MaxPool3D", - ), - ], - ), - [ModelIsCortexACompatible(BACKEND_INFO)], - ], - [ - # pylint: disable=line-too-long - CortexACompatibilityInfo( - False, - [ - Operator( - "UNSUPPORTED_OP", - "somewhere", - support_type=Operator.SupportType.OP_NOT_SUPPORTED, - activation_func=TFL_ACTIVATION_FUNCTION.NONE, - ), - Operator( - "CUSTOM", - "somewhere", - support_type=Operator.SupportType.OP_NOT_SUPPORTED, - activation_func=TFL_ACTIVATION_FUNCTION.NONE, - custom_name="UNSUPPORTED_OP", - ), - Operator( - "CONV_2D", - "somewhere else", - support_type=Operator.SupportType.ACTIVATION_NOT_SUPPORTED, - activation_func=TFL_ACTIVATION_FUNCTION.SIGN_BIT, - ), - ], - ), - [ - ModelIsNotCortexACompatible( - BACKEND_INFO, - { - "UNSUPPORTED_OP", - "CUSTOM - 'UNSUPPORTED_OP'", - }, - { - "CONV_2D": ModelIsNotCortexACompatible.ActivationFunctionSupport( - used_unsupported={TFL_ACTIVATION_FUNCTION.SIGN_BIT.name}, - supported={ - "RELU", - "RELU6", - "RELU_N1_TO_1", - "SIGMOID", - "TANH", - "NONE", - }, - ) - }, - ) - ], - # pylint: enable=line-too-long - ], - [ - TFLiteCompatibilityInfo(status=TFLiteCompatibilityStatus.COMPATIBLE), - [], - ], - [ - TFLiteCompatibilityInfo( - status=TFLiteCompatibilityStatus.MODEL_WITH_CUSTOM_OP_ERROR - ), - [ModelHasCustomOperators()], - ], - [ - TFLiteCompatibilityInfo(status=TFLiteCompatibilityStatus.UNKNOWN_ERROR), - [TFLiteCompatibilityCheckFailed()], - ], - [ - TFLiteCompatibilityInfo( - status=TFLiteCompatibilityStatus.TFLITE_CONVERSION_ERROR - ), - [ModelIsNotTFLiteCompatible(custom_ops=[], flex_ops=[])], - ], - [ - TFLiteCompatibilityInfo( - status=TFLiteCompatibilityStatus.TFLITE_CONVERSION_ERROR, - conversion_errors=[ - TFLiteConversionError( - "error", - TFLiteConversionErrorCode.NEEDS_CUSTOM_OPS, - "custom_op1", - [], - ), - TFLiteConversionError( - "error", - TFLiteConversionErrorCode.NEEDS_FLEX_OPS, - "flex_op1", - [], - ), - ], - ), - [ - ModelIsNotTFLiteCompatible( - custom_ops=["custom_op1"], - flex_ops=["flex_op1"], - ) - ], - ], - ], -) -def test_cortex_a_data_analyzer( - input_data: DataItem, expected_facts: list[Fact] -) -> None: - """Test Cortex-A data analyzer.""" - analyzer = CortexADataAnalyzer() - analyzer.analyze_data(input_data) - assert analyzer.get_analyzed_data() == expected_facts diff --git a/tests/test_devices_cortexa_data_collection.py b/tests/test_devices_cortexa_data_collection.py deleted file mode 100644 index 6d3b2ac..0000000 --- a/tests/test_devices_cortexa_data_collection.py +++ /dev/null @@ -1,52 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Cortex-A data collection module.""" -from pathlib import Path -from unittest.mock import MagicMock - -import pytest - -from mlia.core.context import ExecutionContext -from mlia.devices.cortexa.data_collection import CortexAOperatorCompatibility -from mlia.devices.cortexa.operators import CortexACompatibilityInfo - - -def check_cortex_a_data_collection( - monkeypatch: pytest.MonkeyPatch, model: Path, tmpdir: str -) -> None: - """Test Cortex-A data collection.""" - assert CortexAOperatorCompatibility.name() - - monkeypatch.setattr( - "mlia.devices.cortexa.data_collection.get_cortex_a_compatibility_info", - MagicMock(return_value=CortexACompatibilityInfo(True, [])), - ) - - context = ExecutionContext(working_dir=tmpdir) - collector = CortexAOperatorCompatibility(model) - collector.set_context(context) - - data_item = collector.collect_data() - - assert isinstance(data_item, CortexACompatibilityInfo) - - -def test_cortex_a_data_collection_tflite( - monkeypatch: pytest.MonkeyPatch, test_tflite_model: Path, tmpdir: str -) -> None: - """Test Cortex-A data collection with a TensorFlow Lite model.""" - check_cortex_a_data_collection(monkeypatch, test_tflite_model, tmpdir) - - -def test_cortex_a_data_collection_keras( - monkeypatch: pytest.MonkeyPatch, test_keras_model: Path, tmpdir: str -) -> None: - """Test Cortex-A data collection with a Keras model.""" - check_cortex_a_data_collection(monkeypatch, test_keras_model, tmpdir) - - -def test_cortex_a_data_collection_tf( - monkeypatch: pytest.MonkeyPatch, test_tf_model: Path, tmpdir: str -) -> None: - """Test Cortex-A data collection with a SavedModel.""" - check_cortex_a_data_collection(monkeypatch, test_tf_model, tmpdir) diff --git a/tests/test_devices_cortexa_operators.py b/tests/test_devices_cortexa_operators.py deleted file mode 100644 index 23c4b0a..0000000 --- a/tests/test_devices_cortexa_operators.py +++ /dev/null @@ -1,73 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Cortex-A operator compatibility.""" -from pathlib import Path - -import pytest -import tensorflow as tf - -from mlia.devices.cortexa import operator_compatibility as op_compat -from mlia.devices.cortexa.operators import CortexACompatibilityInfo -from mlia.devices.cortexa.operators import get_cortex_a_compatibility_info -from mlia.devices.cortexa.operators import Operator -from mlia.nn.tensorflow.tflite_graph import TFL_OP -from mlia.nn.tensorflow.utils import convert_to_tflite - - -def test_op_compat_data() -> None: - """Make sure all data contains the necessary items.""" - builtin_tfl_ops = {op.name for op in TFL_OP} - for data in [op_compat.ARMNN_TFLITE_DELEGATE]: - assert "metadata" in data - assert "backend" in data["metadata"] - assert "version" in data["metadata"] - assert "builtin_ops" in data - for comp in data["builtin_ops"]: - assert comp in builtin_tfl_ops - assert "custom_ops" in data - - -def check_get_cortex_a_compatibility_info( - model_path: Path, - expected_success: bool, -) -> None: - """Check the function 'get_cortex_a_compatibility_info'.""" - compat_info = get_cortex_a_compatibility_info(model_path) - assert isinstance(compat_info, CortexACompatibilityInfo) - assert expected_success == compat_info.cortex_a_compatible - assert compat_info.operators - for oper in compat_info.operators: - assert oper.name - assert oper.location - assert oper.support_type in Operator.SupportType - - -def test_get_cortex_a_compatibility_info_compatible( - test_tflite_model: Path, -) -> None: - """Test a fully compatible TensorFlow Lite model.""" - check_get_cortex_a_compatibility_info(test_tflite_model, expected_success=True) - - -def test_get_cortex_a_compatibility_info_not_compatible( - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Construct and test a NOT fully compatible TensorFlow Lite model.""" - keras_model = tf.keras.Sequential( - [ - tf.keras.Input(shape=(28, 28, 1), batch_size=1, name="input"), - tf.keras.layers.Conv2D( - filters=12, kernel_size=(3, 3), activation="softmax", name="conv1" - ), - tf.keras.layers.LeakyReLU(), - ] - ) - keras_model.compile(optimizer="sgd", loss="mean_squared_error") - tflite_model = convert_to_tflite(keras_model, quantized=False) - - monkeypatch.setattr( - "mlia.nn.tensorflow.tflite_graph.load_tflite", lambda _p: tflite_model - ) - check_get_cortex_a_compatibility_info( - Path("NOT_USED_BECAUSE_OF_MOCKING"), expected_success=False - ) diff --git a/tests/test_devices_cortexa_reporters.py b/tests/test_devices_cortexa_reporters.py deleted file mode 100644 index 1110653..0000000 --- a/tests/test_devices_cortexa_reporters.py +++ /dev/null @@ -1,53 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Cortex-A reporters.""" -from typing import Any - -import pytest - -from mlia.core.advice_generation import Advice -from mlia.core.reporting import Report -from mlia.devices.cortexa.config import CortexAConfiguration -from mlia.devices.cortexa.operators import Operator -from mlia.devices.cortexa.reporters import cortex_a_formatters -from mlia.devices.cortexa.reporters import report_device -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo -from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityStatus -from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION - - -def test_report_device() -> None: - """Test function report_device().""" - report = report_device(CortexAConfiguration("cortex-a")) - assert report.to_plain_text() - - -@pytest.mark.parametrize( - "data", - ( - [Advice(["Sample", "Advice"])], - TFLiteCompatibilityInfo(status=TFLiteCompatibilityStatus.COMPATIBLE), - [ - Operator( - name="Test", - location="loc", - support_type=Operator.SupportType.OP_NOT_SUPPORTED, - activation_func=TFL_ACTIVATION_FUNCTION.NONE, - ) - ], - ), -) -def test_cortex_a_formatters(data: Any) -> None: - """Test function cortex_a_formatters() with valid input.""" - formatter = cortex_a_formatters(data) - report = formatter(data) - assert isinstance(report, Report) - - -def test_cortex_a_formatters_invalid_data() -> None: - """Test cortex_a_formatters() with invalid input.""" - with pytest.raises( - Exception, - match=r"^Unable to find appropriate formatter for .*", - ): - cortex_a_formatters(12) diff --git a/tests/test_devices_ethosu_advice_generation.py b/tests/test_devices_ethosu_advice_generation.py deleted file mode 100644 index 21a3667..0000000 --- a/tests/test_devices_ethosu_advice_generation.py +++ /dev/null @@ -1,482 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Ethos-U advice generation.""" -from __future__ import annotations - -import pytest - -from mlia.cli.helpers import CLIActionResolver -from mlia.core.advice_generation import Advice -from mlia.core.common import AdviceCategory -from mlia.core.common import DataItem -from mlia.core.context import ExecutionContext -from mlia.core.helpers import ActionResolver -from mlia.core.helpers import APIActionResolver -from mlia.devices.ethosu.advice_generation import EthosUAdviceProducer -from mlia.devices.ethosu.advice_generation import EthosUStaticAdviceProducer -from mlia.devices.ethosu.data_analysis import AllOperatorsSupportedOnNPU -from mlia.devices.ethosu.data_analysis import HasCPUOnlyOperators -from mlia.devices.ethosu.data_analysis import HasUnsupportedOnNPUOperators -from mlia.devices.ethosu.data_analysis import OptimizationDiff -from mlia.devices.ethosu.data_analysis import OptimizationResults -from mlia.devices.ethosu.data_analysis import PerfMetricDiff -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings - - -@pytest.mark.parametrize( - "input_data, advice_category, action_resolver, expected_advice", - [ - [ - AllOperatorsSupportedOnNPU(), - AdviceCategory.OPERATORS, - APIActionResolver(), - [ - Advice( - [ - "You don't have any unsupported operators, your model will " - "run completely on NPU." - ] - ) - ], - ], - [ - AllOperatorsSupportedOnNPU(), - AdviceCategory.OPERATORS, - CLIActionResolver( - { - "target_profile": "sample_target", - "model": "sample_model.tflite", - } - ), - [ - Advice( - [ - "You don't have any unsupported operators, your model will " - "run completely on NPU.", - "Check the estimated performance by running the " - "following command: ", - "mlia performance --target-profile sample_target " - "sample_model.tflite", - ] - ) - ], - ], - [ - HasCPUOnlyOperators(cpu_only_ops=["OP1", "OP2", "OP3"]), - AdviceCategory.OPERATORS, - APIActionResolver(), - [ - Advice( - [ - "You have at least 3 operators that is CPU only: " - "OP1,OP2,OP3.", - "Using operators that are supported by the NPU will " - "improve performance.", - ] - ) - ], - ], - [ - HasCPUOnlyOperators(cpu_only_ops=["OP1", "OP2", "OP3"]), - AdviceCategory.OPERATORS, - CLIActionResolver({}), - [ - Advice( - [ - "You have at least 3 operators that is CPU only: " - "OP1,OP2,OP3.", - "Using operators that are supported by the NPU will " - "improve performance.", - "For guidance on supported operators, run: mlia operators " - "--supported-ops-report", - ] - ) - ], - ], - [ - HasUnsupportedOnNPUOperators(npu_unsupported_ratio=0.4), - AdviceCategory.OPERATORS, - APIActionResolver(), - [ - Advice( - [ - "You have 40% of operators that cannot be placed on the NPU.", - "For better performance, please review the reasons reported " - "in the table, and adjust the model accordingly " - "where possible.", - ] - ) - ], - ], - [ - HasUnsupportedOnNPUOperators(npu_unsupported_ratio=0.4), - AdviceCategory.OPERATORS, - CLIActionResolver({}), - [ - Advice( - [ - "You have 40% of operators that cannot be placed on the NPU.", - "For better performance, please review the reasons reported " - "in the table, and adjust the model accordingly " - "where possible.", - ] - ) - ], - ], - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[OptimizationSettings("pruning", 0.5, None)], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 50), - "on_chip_flash": PerfMetricDiff(100, 100), - "off_chip_flash": PerfMetricDiff(100, 100), - "npu_total_cycles": PerfMetricDiff(10, 5), - }, - ), - ] - ), - AdviceCategory.OPTIMIZATION, - APIActionResolver(), - [ - Advice( - [ - "With the selected optimization (pruning: 0.5)", - "- You have achieved 50.00% performance improvement in " - "DRAM used (KB)", - "- You have achieved 50.00% performance improvement in " - "NPU total cycles", - "- SRAM used (KB) have degraded by 50.00%", - "You can try to push the optimization target higher " - "(e.g. pruning: 0.6) " - "to check if those results can be further improved.", - ] - ), - Advice( - [ - "The applied tooling techniques have an impact " - "on accuracy. Additional hyperparameter tuning may be required " - "after any optimization." - ] - ), - ], - ], - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[OptimizationSettings("pruning", 0.5, None)], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 50), - "on_chip_flash": PerfMetricDiff(100, 100), - "off_chip_flash": PerfMetricDiff(100, 100), - "npu_total_cycles": PerfMetricDiff(10, 5), - }, - ), - ] - ), - AdviceCategory.OPTIMIZATION, - CLIActionResolver({"model": "sample_model.h5"}), - [ - Advice( - [ - "With the selected optimization (pruning: 0.5)", - "- You have achieved 50.00% performance improvement in " - "DRAM used (KB)", - "- You have achieved 50.00% performance improvement in " - "NPU total cycles", - "- SRAM used (KB) have degraded by 50.00%", - "You can try to push the optimization target higher " - "(e.g. pruning: 0.6) " - "to check if those results can be further improved.", - "For more info: mlia optimization --help", - "Optimization command: " - "mlia optimization --optimization-type pruning " - "--optimization-target 0.6 sample_model.h5", - ] - ), - Advice( - [ - "The applied tooling techniques have an impact " - "on accuracy. Additional hyperparameter tuning may be required " - "after any optimization." - ] - ), - ], - ], - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[ - OptimizationSettings("pruning", 0.5, None), - OptimizationSettings("clustering", 32, None), - ], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 50), - "on_chip_flash": PerfMetricDiff(100, 100), - "off_chip_flash": PerfMetricDiff(100, 100), - "npu_total_cycles": PerfMetricDiff(10, 5), - }, - ), - ] - ), - AdviceCategory.OPTIMIZATION, - APIActionResolver(), - [ - Advice( - [ - "With the selected optimization (pruning: 0.5, clustering: 32)", - "- You have achieved 50.00% performance improvement in " - "DRAM used (KB)", - "- You have achieved 50.00% performance improvement in " - "NPU total cycles", - "- SRAM used (KB) have degraded by 50.00%", - "You can try to push the optimization target higher " - "(e.g. pruning: 0.6 and/or clustering: 16) " - "to check if those results can be further improved.", - ] - ), - Advice( - [ - "The applied tooling techniques have an impact " - "on accuracy. Additional hyperparameter tuning may be required " - "after any optimization." - ] - ), - ], - ], - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[ - OptimizationSettings("clustering", 2, None), - ], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 50), - "on_chip_flash": PerfMetricDiff(100, 100), - "off_chip_flash": PerfMetricDiff(100, 100), - "npu_total_cycles": PerfMetricDiff(10, 5), - }, - ), - ] - ), - AdviceCategory.OPTIMIZATION, - APIActionResolver(), - [ - Advice( - [ - "With the selected optimization (clustering: 2)", - "- You have achieved 50.00% performance improvement in " - "DRAM used (KB)", - "- You have achieved 50.00% performance improvement in " - "NPU total cycles", - "- SRAM used (KB) have degraded by 50.00%", - ] - ), - Advice( - [ - "The applied tooling techniques have an impact " - "on accuracy. Additional hyperparameter tuning may be required " - "after any optimization." - ] - ), - ], - ], - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[OptimizationSettings("pruning", 0.5, None)], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 150), - "on_chip_flash": PerfMetricDiff(100, 150), - "off_chip_flash": PerfMetricDiff(100, 150), - "npu_total_cycles": PerfMetricDiff(10, 100), - }, - ), - ] - ), - AdviceCategory.OPTIMIZATION, - APIActionResolver(), - [ - Advice( - [ - "With the selected optimization (pruning: 0.5)", - "- DRAM used (KB) have degraded by 50.00%", - "- SRAM used (KB) have degraded by 50.00%", - "- On chip flash used (KB) have degraded by 50.00%", - "- Off chip flash used (KB) have degraded by 50.00%", - "- NPU total cycles have degraded by 900.00%", - "The performance seems to have degraded after " - "applying the selected optimizations, " - "try exploring different optimization types/targets.", - ] - ), - Advice( - [ - "The applied tooling techniques have an impact " - "on accuracy. Additional hyperparameter tuning may be required " - "after any optimization." - ] - ), - ], - ], - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[OptimizationSettings("pruning", 0.5, None)], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 150), - "on_chip_flash": PerfMetricDiff(100, 150), - "off_chip_flash": PerfMetricDiff(100, 150), - "npu_total_cycles": PerfMetricDiff(10, 100), - }, - ), - OptimizationDiff( - opt_type=[OptimizationSettings("pruning", 0.6, None)], - opt_diffs={ - "sram": PerfMetricDiff(100, 150), - "dram": PerfMetricDiff(100, 150), - "on_chip_flash": PerfMetricDiff(100, 150), - "off_chip_flash": PerfMetricDiff(100, 150), - "npu_total_cycles": PerfMetricDiff(10, 100), - }, - ), - ] - ), - AdviceCategory.OPTIMIZATION, - APIActionResolver(), - [], # no advice for more than one optimization result - ], - ], -) -def test_ethosu_advice_producer( - tmpdir: str, - input_data: DataItem, - expected_advice: list[Advice], - advice_category: AdviceCategory, - action_resolver: ActionResolver, -) -> None: - """Test Ethos-U Advice producer.""" - producer = EthosUAdviceProducer() - - context = ExecutionContext( - advice_category=advice_category, - working_dir=tmpdir, - action_resolver=action_resolver, - ) - - producer.set_context(context) - producer.produce_advice(input_data) - - assert producer.get_advice() == expected_advice - - -@pytest.mark.parametrize( - "advice_category, action_resolver, expected_advice", - [ - [ - AdviceCategory.ALL, - None, - [], - ], - [ - AdviceCategory.OPERATORS, - None, - [], - ], - [ - AdviceCategory.PERFORMANCE, - APIActionResolver(), - [ - Advice( - [ - "You can improve the inference time by using only operators " - "that are supported by the NPU.", - ] - ), - Advice( - [ - "Check if you can improve the performance by applying " - "tooling techniques to your model." - ] - ), - ], - ], - [ - AdviceCategory.PERFORMANCE, - CLIActionResolver({"model": "test_model.h5"}), - [ - Advice( - [ - "You can improve the inference time by using only operators " - "that are supported by the NPU.", - "Try running the following command to verify that:", - "mlia operators test_model.h5", - ] - ), - Advice( - [ - "Check if you can improve the performance by applying " - "tooling techniques to your model.", - "For example: mlia optimization --optimization-type " - "pruning,clustering --optimization-target 0.5,32 " - "test_model.h5", - "For more info: mlia optimization --help", - ] - ), - ], - ], - [ - AdviceCategory.OPTIMIZATION, - APIActionResolver(), - [ - Advice( - [ - "For better performance, make sure that all the operators " - "of your final TensorFlow Lite model are supported by the NPU.", - ] - ) - ], - ], - [ - AdviceCategory.OPTIMIZATION, - CLIActionResolver({"model": "test_model.h5"}), - [ - Advice( - [ - "For better performance, make sure that all the operators " - "of your final TensorFlow Lite model are supported by the NPU.", - "For more details, run: mlia operators --help", - ] - ) - ], - ], - ], -) -def test_ethosu_static_advice_producer( - tmpdir: str, - advice_category: AdviceCategory, - action_resolver: ActionResolver, - expected_advice: list[Advice], -) -> None: - """Test static advice generation.""" - producer = EthosUStaticAdviceProducer() - - context = ExecutionContext( - advice_category=advice_category, - working_dir=tmpdir, - action_resolver=action_resolver, - ) - producer.set_context(context) - assert producer.get_advice() == expected_advice diff --git a/tests/test_devices_ethosu_advisor.py b/tests/test_devices_ethosu_advisor.py deleted file mode 100644 index 74d2408..0000000 --- a/tests/test_devices_ethosu_advisor.py +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Ethos-U MLIA module.""" -from mlia.devices.ethosu.advisor import EthosUInferenceAdvisor - - -def test_advisor_metadata() -> None: - """Test advisor metadata.""" - assert EthosUInferenceAdvisor.name() == "ethos_u_inference_advisor" diff --git a/tests/test_devices_ethosu_config.py b/tests/test_devices_ethosu_config.py deleted file mode 100644 index 2fec0d5..0000000 --- a/tests/test_devices_ethosu_config.py +++ /dev/null @@ -1,125 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for config module.""" -from __future__ import annotations - -from contextlib import ExitStack as does_not_raise -from typing import Any -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.vela.compiler import VelaCompilerOptions -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.config import get_target -from mlia.utils.filesystem import get_vela_config - - -def test_compiler_options_default_init() -> None: - """Test compiler options default init.""" - opts = VelaCompilerOptions() - - assert opts.config_files is None - assert opts.system_config == "internal-default" - assert opts.memory_mode == "internal-default" - assert opts.accelerator_config is None - assert opts.max_block_dependency == 3 - assert opts.arena_cache_size is None - assert opts.tensor_allocator == "HillClimb" - assert opts.cpu_tensor_alignment == 16 - assert opts.optimization_strategy == "Performance" - assert opts.output_dir is None - - -def test_ethosu_target() -> None: - """Test Ethos-U target configuration init.""" - default_config = EthosUConfiguration("ethos-u55-256") - - assert default_config.target == "ethos-u55" - assert default_config.mac == 256 - assert default_config.compiler_options is not None - - -def test_get_target() -> None: - """Test function get_target.""" - with pytest.raises(Exception, match="No target profile given"): - get_target(None) # type: ignore - - with pytest.raises(Exception, match="Unable to find target profile unknown"): - get_target("unknown") - - u65_device = get_target("ethos-u65-512") - - assert isinstance(u65_device, EthosUConfiguration) - assert u65_device.target == "ethos-u65" - assert u65_device.mac == 512 - assert u65_device.compiler_options.accelerator_config == "ethos-u65-512" - assert u65_device.compiler_options.memory_mode == "Dedicated_Sram" - assert u65_device.compiler_options.config_files == str(get_vela_config()) - - -@pytest.mark.parametrize( - "profile_data, expected_error", - [ - [ - {}, - pytest.raises( - Exception, - match="Mandatory fields missing from target profile: " - r"\['mac', 'memory_mode', 'system_config', 'target'\]", - ), - ], - [ - {"target": "ethos-u65", "mac": 512}, - pytest.raises( - Exception, - match="Mandatory fields missing from target profile: " - r"\['memory_mode', 'system_config'\]", - ), - ], - [ - { - "target": "ethos-u65", - "mac": 2, - "system_config": "Ethos_U65_Embedded", - "memory_mode": "Shared_Sram", - }, - pytest.raises( - Exception, - match=r"Mac value for selected device should be in \[256, 512\]", - ), - ], - [ - { - "target": "ethos-u55", - "mac": 1, - "system_config": "Ethos_U55_High_End_Embedded", - "memory_mode": "Shared_Sram", - }, - pytest.raises( - Exception, - match="Mac value for selected device should be " - r"in \[32, 64, 128, 256\]", - ), - ], - [ - { - "target": "ethos-u65", - "mac": 512, - "system_config": "Ethos_U65_Embedded", - "memory_mode": "Shared_Sram", - }, - does_not_raise(), - ], - ], -) -def test_ethosu_configuration( - monkeypatch: pytest.MonkeyPatch, profile_data: dict[str, Any], expected_error: Any -) -> None: - """Test creating Ethos-U configuration.""" - monkeypatch.setattr( - "mlia.devices.ethosu.config.get_profile", MagicMock(return_value=profile_data) - ) - - with expected_error: - EthosUConfiguration("target") diff --git a/tests/test_devices_ethosu_data_analysis.py b/tests/test_devices_ethosu_data_analysis.py deleted file mode 100644 index 8184c70..0000000 --- a/tests/test_devices_ethosu_data_analysis.py +++ /dev/null @@ -1,147 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Ethos-U data analysis module.""" -from __future__ import annotations - -import pytest - -from mlia.backend.vela.compat import NpuSupported -from mlia.backend.vela.compat import Operator -from mlia.backend.vela.compat import Operators -from mlia.core.common import DataItem -from mlia.core.data_analysis import Fact -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.data_analysis import AllOperatorsSupportedOnNPU -from mlia.devices.ethosu.data_analysis import EthosUDataAnalyzer -from mlia.devices.ethosu.data_analysis import HasCPUOnlyOperators -from mlia.devices.ethosu.data_analysis import HasUnsupportedOnNPUOperators -from mlia.devices.ethosu.data_analysis import OptimizationDiff -from mlia.devices.ethosu.data_analysis import OptimizationResults -from mlia.devices.ethosu.data_analysis import PerfMetricDiff -from mlia.devices.ethosu.performance import MemoryUsage -from mlia.devices.ethosu.performance import NPUCycles -from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics -from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings - - -def test_perf_metrics_diff() -> None: - """Test PerfMetricsDiff class.""" - diff_same = PerfMetricDiff(1, 1) - assert diff_same.same is True - assert diff_same.improved is False - assert diff_same.degraded is False - assert diff_same.diff == 0 - - diff_improved = PerfMetricDiff(10, 5) - assert diff_improved.same is False - assert diff_improved.improved is True - assert diff_improved.degraded is False - assert diff_improved.diff == 50.0 - - diff_degraded = PerfMetricDiff(5, 10) - assert diff_degraded.same is False - assert diff_degraded.improved is False - assert diff_degraded.degraded is True - assert diff_degraded.diff == -100.0 - - diff_original_zero = PerfMetricDiff(0, 1) - assert diff_original_zero.diff == 0 - - -@pytest.mark.parametrize( - "input_data, expected_facts", - [ - [ - Operators( - [ - Operator( - "CPU operator", - "CPU operator type", - NpuSupported(False, [("CPU only operator", "")]), - ) - ] - ), - [ - HasCPUOnlyOperators(["CPU operator type"]), - HasUnsupportedOnNPUOperators(1.0), - ], - ], - [ - Operators( - [ - Operator( - "NPU operator", - "NPU operator type", - NpuSupported(True, []), - ) - ] - ), - [ - AllOperatorsSupportedOnNPU(), - ], - ], - [ - OptimizationPerformanceMetrics( - PerformanceMetrics( - EthosUConfiguration("ethos-u55-256"), - NPUCycles(1, 2, 3, 4, 5, 6), - # memory metrics are in kilobytes - MemoryUsage(*[i * 1024 for i in range(1, 6)]), # type: ignore - ), - [ - [ - [ - OptimizationSettings("pruning", 0.5, None), - ], - PerformanceMetrics( - EthosUConfiguration("ethos-u55-256"), - NPUCycles(1, 2, 3, 4, 5, 6), - # memory metrics are in kilobytes - MemoryUsage( - *[i * 1024 for i in range(1, 6)] # type: ignore - ), - ), - ], - ], - ), - [ - OptimizationResults( - [ - OptimizationDiff( - opt_type=[ - OptimizationSettings("pruning", 0.5, None), - ], - opt_diffs={ - "sram": PerfMetricDiff(1.0, 1.0), - "dram": PerfMetricDiff(2.0, 2.0), - "on_chip_flash": PerfMetricDiff(4.0, 4.0), - "off_chip_flash": PerfMetricDiff(5.0, 5.0), - "npu_total_cycles": PerfMetricDiff(3, 3), - }, - ) - ] - ) - ], - ], - [ - OptimizationPerformanceMetrics( - PerformanceMetrics( - EthosUConfiguration("ethos-u55-256"), - NPUCycles(1, 2, 3, 4, 5, 6), - # memory metrics are in kilobytes - MemoryUsage(*[i * 1024 for i in range(1, 6)]), # type: ignore - ), - [], - ), - [], - ], - ], -) -def test_ethos_u_data_analyzer( - input_data: DataItem, expected_facts: list[Fact] -) -> None: - """Test Ethos-U data analyzer.""" - analyzer = EthosUDataAnalyzer() - analyzer.analyze_data(input_data) - assert analyzer.get_analyzed_data() == expected_facts diff --git a/tests/test_devices_ethosu_data_collection.py b/tests/test_devices_ethosu_data_collection.py deleted file mode 100644 index 84b9424..0000000 --- a/tests/test_devices_ethosu_data_collection.py +++ /dev/null @@ -1,151 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for the data collection module for Ethos-U.""" -from pathlib import Path -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.vela.compat import Operators -from mlia.core.context import Context -from mlia.core.data_collection import DataCollector -from mlia.core.errors import FunctionalityNotSupportedError -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.data_collection import EthosUOperatorCompatibility -from mlia.devices.ethosu.data_collection import EthosUOptimizationPerformance -from mlia.devices.ethosu.data_collection import EthosUPerformance -from mlia.devices.ethosu.performance import MemoryUsage -from mlia.devices.ethosu.performance import NPUCycles -from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics -from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.nn.tensorflow.optimizations.select import OptimizationSettings - - -@pytest.mark.parametrize( - "collector, expected_name", - [ - ( - EthosUOperatorCompatibility, - "ethos_u_operator_compatibility", - ), - ( - EthosUPerformance, - "ethos_u_performance", - ), - ( - EthosUOptimizationPerformance, - "ethos_u_model_optimizations", - ), - ], -) -def test_collectors_metadata( - collector: DataCollector, - expected_name: str, -) -> None: - """Test collectors metadata.""" - assert collector.name() == expected_name - - -def test_operator_compatibility_collector( - sample_context: Context, test_tflite_model: Path -) -> None: - """Test operator compatibility data collector.""" - device = EthosUConfiguration("ethos-u55-256") - - collector = EthosUOperatorCompatibility(test_tflite_model, device) - collector.set_context(sample_context) - - result = collector.collect_data() - assert isinstance(result, Operators) - - -def test_performance_collector( - monkeypatch: pytest.MonkeyPatch, sample_context: Context, test_tflite_model: Path -) -> None: - """Test performance data collector.""" - device = EthosUConfiguration("ethos-u55-256") - - mock_performance_estimation(monkeypatch, device) - - collector = EthosUPerformance(test_tflite_model, device) - collector.set_context(sample_context) - - result = collector.collect_data() - assert isinstance(result, PerformanceMetrics) - - -def test_optimization_performance_collector( - monkeypatch: pytest.MonkeyPatch, - sample_context: Context, - test_keras_model: Path, - test_tflite_model: Path, -) -> None: - """Test optimization performance data collector.""" - device = EthosUConfiguration("ethos-u55-256") - - mock_performance_estimation(monkeypatch, device) - collector = EthosUOptimizationPerformance( - test_keras_model, - device, - [ - [ - {"optimization_type": "pruning", "optimization_target": 0.5}, - ] - ], - ) - collector.set_context(sample_context) - result = collector.collect_data() - - assert isinstance(result, OptimizationPerformanceMetrics) - assert isinstance(result.original_perf_metrics, PerformanceMetrics) - assert isinstance(result.optimizations_perf_metrics, list) - assert len(result.optimizations_perf_metrics) == 1 - - opt, metrics = result.optimizations_perf_metrics[0] - assert opt == [OptimizationSettings("pruning", 0.5, None)] - assert isinstance(metrics, PerformanceMetrics) - - collector_no_optimizations = EthosUOptimizationPerformance( - test_keras_model, - device, - [], - ) - with pytest.raises(FunctionalityNotSupportedError): - collector_no_optimizations.collect_data() - - collector_tflite = EthosUOptimizationPerformance( - test_tflite_model, - device, - [ - [ - {"optimization_type": "pruning", "optimization_target": 0.5}, - ] - ], - ) - collector_tflite.set_context(sample_context) - with pytest.raises(FunctionalityNotSupportedError): - collector_tflite.collect_data() - - with pytest.raises( - Exception, match="Optimization parameters expected to be a list" - ): - collector_bad_config = EthosUOptimizationPerformance( - test_keras_model, device, {"optimization_type": "pruning"} # type: ignore - ) - collector.set_context(sample_context) - collector_bad_config.collect_data() - - -def mock_performance_estimation( - monkeypatch: pytest.MonkeyPatch, device: EthosUConfiguration -) -> None: - """Mock performance estimation.""" - metrics = PerformanceMetrics( - device, - NPUCycles(1, 2, 3, 4, 5, 6), - MemoryUsage(1, 2, 3, 4, 5), - ) - monkeypatch.setattr( - "mlia.devices.ethosu.data_collection.EthosUPerformanceEstimator.estimate", - MagicMock(return_value=metrics), - ) diff --git a/tests/test_devices_ethosu_performance.py b/tests/test_devices_ethosu_performance.py deleted file mode 100644 index 3ff73d8..0000000 --- a/tests/test_devices_ethosu_performance.py +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Performance estimation tests.""" -from unittest.mock import MagicMock - -import pytest - -from mlia.devices.ethosu.performance import MemorySizeType -from mlia.devices.ethosu.performance import MemoryUsage - - -def test_memory_usage_conversion() -> None: - """Test MemoryUsage objects conversion.""" - memory_usage_in_kb = MemoryUsage(1, 2, 3, 4, 5, MemorySizeType.KILOBYTES) - assert memory_usage_in_kb.in_kilobytes() == memory_usage_in_kb - - memory_usage_in_bytes = MemoryUsage( - 1 * 1024, 2 * 1024, 3 * 1024, 4 * 1024, 5 * 1024 - ) - assert memory_usage_in_bytes.in_kilobytes() == memory_usage_in_kb - - -def mock_performance_estimation(monkeypatch: pytest.MonkeyPatch) -> None: - """Mock performance estimation.""" - monkeypatch.setattr( - "mlia.backend.corstone.performance.estimate_performance", - MagicMock(return_value=MagicMock()), - ) diff --git a/tests/test_devices_ethosu_reporters.py b/tests/test_devices_ethosu_reporters.py deleted file mode 100644 index 926c4c3..0000000 --- a/tests/test_devices_ethosu_reporters.py +++ /dev/null @@ -1,353 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for reports module.""" -from __future__ import annotations - -import json -import sys -from contextlib import ExitStack as doesnt_raise -from pathlib import Path -from typing import Any -from typing import Callable -from typing import Literal - -import pytest - -from mlia.backend.vela.compat import NpuSupported -from mlia.backend.vela.compat import Operator -from mlia.backend.vela.compat import Operators -from mlia.core.reporting import get_reporter -from mlia.core.reporting import produce_report -from mlia.core.reporting import Report -from mlia.core.reporting import Reporter -from mlia.core.reporting import Table -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.devices.ethosu.performance import MemoryUsage -from mlia.devices.ethosu.performance import NPUCycles -from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.devices.ethosu.reporters import ethos_u_formatters -from mlia.devices.ethosu.reporters import report_device_details -from mlia.devices.ethosu.reporters import report_operators -from mlia.devices.ethosu.reporters import report_perf_metrics -from mlia.utils.console import remove_ascii_codes - - -@pytest.mark.parametrize( - "data, formatters", - [ - ( - [Operator("test_operator", "test_type", NpuSupported(False, []))], - [report_operators], - ), - ( - PerformanceMetrics( - EthosUConfiguration("ethos-u55-256"), - NPUCycles(0, 0, 0, 0, 0, 0), - MemoryUsage(0, 0, 0, 0, 0), - ), - [report_perf_metrics], - ), - ], -) -@pytest.mark.parametrize( - "fmt, output, expected_error", - [ - [ - "unknown_format", - sys.stdout, - pytest.raises(Exception, match="Unknown format unknown_format"), - ], - [ - "plain_text", - sys.stdout, - doesnt_raise(), - ], - [ - "json", - sys.stdout, - doesnt_raise(), - ], - [ - "plain_text", - "report.txt", - doesnt_raise(), - ], - [ - "json", - "report.json", - doesnt_raise(), - ], - ], -) -def test_report( - data: Any, - formatters: list[Callable], - fmt: Literal["plain_text", "json"], - output: Any, - expected_error: Any, - tmp_path: Path, -) -> None: - """Test report function.""" - if is_file := isinstance(output, str): - output = tmp_path / output - - for formatter in formatters: - with expected_error: - produce_report(data, formatter, fmt, output) - - if is_file: - assert output.is_file() - assert output.stat().st_size > 0 - - -@pytest.mark.parametrize( - "ops, expected_plain_text, expected_json_dict", - [ - ( - [ - Operator( - "npu_supported", - "test_type", - NpuSupported(True, []), - ), - Operator( - "cpu_only", - "test_type", - NpuSupported( - False, - [ - ( - "CPU only operator", - "", - ), - ], - ), - ), - Operator( - "npu_unsupported", - "test_type", - NpuSupported( - False, - [ - ( - "Not supported operator", - "Reason why operator is not supported", - ) - ], - ), - ), - ], - """ -Operators: -┌───┬─────────────────┬───────────────┬───────────┬───────────────────────────────┐ -│ # │ Operator name │ Operator type │ Placement │ Notes │ -╞═══╪═════════════════╪═══════════════╪═══════════╪═══════════════════════════════╡ -│ 1 │ npu_supported │ test_type │ NPU │ │ -├───┼─────────────────┼───────────────┼───────────┼───────────────────────────────┤ -│ 2 │ cpu_only │ test_type │ CPU │ * CPU only operator │ -├───┼─────────────────┼───────────────┼───────────┼───────────────────────────────┤ -│ 3 │ npu_unsupported │ test_type │ CPU │ * Not supported operator │ -│ │ │ │ │ │ -│ │ │ │ │ * Reason why operator is not │ -│ │ │ │ │ supported │ -└───┴─────────────────┴───────────────┴───────────┴───────────────────────────────┘ -""".strip(), - { - "operators": [ - { - "operator_name": "npu_supported", - "operator_type": "test_type", - "placement": "NPU", - "notes": [], - }, - { - "operator_name": "cpu_only", - "operator_type": "test_type", - "placement": "CPU", - "notes": [{"note": "CPU only operator"}], - }, - { - "operator_name": "npu_unsupported", - "operator_type": "test_type", - "placement": "CPU", - "notes": [ - {"note": "Not supported operator"}, - {"note": "Reason why operator is not supported"}, - ], - }, - ] - }, - ), - ], -) -def test_report_operators( - ops: list[Operator], - expected_plain_text: str, - expected_json_dict: dict, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test report_operatos formatter.""" - # make terminal wide enough to print whole table - monkeypatch.setenv("COLUMNS", "100") - - report = report_operators(ops) - assert isinstance(report, Table) - - plain_text = remove_ascii_codes(report.to_plain_text()) - assert plain_text == expected_plain_text - - json_dict = report.to_json() - assert json_dict == expected_json_dict - - -@pytest.mark.parametrize( - "device, expected_plain_text, expected_json_dict", - [ - [ - EthosUConfiguration("ethos-u55-256"), - """Device information: - Target ethos-u55 - MAC 256 - - Memory mode Shared_Sram - Const mem area Axi1 - Arena mem area Axi0 - Cache mem area Axi0 - Arena cache size 2,096,768 bytes - - System config Ethos_U55_High_End_Embedded - Accelerator clock 500,000,000 Hz - AXI0 port Sram - AXI1 port OffChipFlash - - Memory area settings: - Sram: - Clock scales 1.0 - Burst length 32 bytes - Read latency 32 cycles - Write latency 32 cycles - - Dram: - Clock scales 1.0 - Burst length 1 byte - Read latency 0 cycles - Write latency 0 cycles - - OnChipFlash: - Clock scales 1.0 - Burst length 1 byte - Read latency 0 cycles - Write latency 0 cycles - - OffChipFlash: - Clock scales 0.125 - Burst length 128 bytes - Read latency 64 cycles - Write latency 64 cycles - - Architecture settings: - Permanent storage mem area OffChipFlash - Feature map storage mem area Sram - Fast storage mem area Sram""", - { - "device": { - "target": "ethos-u55", - "mac": 256, - "memory_mode": { - "const_mem_area": "Axi1", - "arena_mem_area": "Axi0", - "cache_mem_area": "Axi0", - "arena_cache_size": {"value": 2096768, "unit": "bytes"}, - }, - "system_config": { - "accelerator_clock": {"value": 500000000.0, "unit": "Hz"}, - "axi0_port": "Sram", - "axi1_port": "OffChipFlash", - "memory_area": { - "Sram": { - "clock_scales": 1.0, - "burst_length": {"value": 32, "unit": "bytes"}, - "read_latency": {"value": 32, "unit": "cycles"}, - "write_latency": {"value": 32, "unit": "cycles"}, - }, - "Dram": { - "clock_scales": 1.0, - "burst_length": {"value": 1, "unit": "byte"}, - "read_latency": {"value": 0, "unit": "cycles"}, - "write_latency": {"value": 0, "unit": "cycles"}, - }, - "OnChipFlash": { - "clock_scales": 1.0, - "burst_length": {"value": 1, "unit": "byte"}, - "read_latency": {"value": 0, "unit": "cycles"}, - "write_latency": {"value": 0, "unit": "cycles"}, - }, - "OffChipFlash": { - "clock_scales": 0.125, - "burst_length": {"value": 128, "unit": "bytes"}, - "read_latency": {"value": 64, "unit": "cycles"}, - "write_latency": {"value": 64, "unit": "cycles"}, - }, - }, - }, - "arch_settings": { - "permanent_storage_mem_area": "OffChipFlash", - "feature_map_storage_mem_area": "Sram", - "fast_storage_mem_area": "Sram", - }, - } - }, - ], - ], -) -def test_report_device_details( - device: EthosUConfiguration, - expected_plain_text: str, - expected_json_dict: dict, -) -> None: - """Test report_operatos formatter.""" - report = report_device_details(device) - assert isinstance(report, Report) - - plain_text = report.to_plain_text() - assert plain_text == expected_plain_text - - json_dict = report.to_json() - assert json_dict == expected_json_dict - - -def test_get_reporter(tmp_path: Path) -> None: - """Test reporter functionality.""" - ops = Operators( - [ - Operator( - "npu_supported", - "op_type", - NpuSupported(True, []), - ), - ] - ) - - output = tmp_path / "output.json" - with get_reporter("json", output, ethos_u_formatters) as reporter: - assert isinstance(reporter, Reporter) - - with pytest.raises( - Exception, match="Unable to find appropriate formatter for some_data" - ): - reporter.submit("some_data") - - reporter.submit(ops) - - with open(output, encoding="utf-8") as file: - json_data = json.load(file) - - assert json_data == { - "operators_stats": [ - { - "npu_unsupported_ratio": 0.0, - "num_of_npu_supported_operators": 1, - "num_of_operators": 1, - } - ] - } diff --git a/tests/test_devices_tosa_advice_generation.py b/tests/test_devices_tosa_advice_generation.py deleted file mode 100644 index 1b97c8b..0000000 --- a/tests/test_devices_tosa_advice_generation.py +++ /dev/null @@ -1,56 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for advice generation.""" -from __future__ import annotations - -import pytest - -from mlia.core.advice_generation import Advice -from mlia.core.common import AdviceCategory -from mlia.core.common import DataItem -from mlia.core.context import ExecutionContext -from mlia.devices.tosa.advice_generation import TOSAAdviceProducer -from mlia.devices.tosa.data_analysis import ModelIsNotTOSACompatible -from mlia.devices.tosa.data_analysis import ModelIsTOSACompatible - - -@pytest.mark.parametrize( - "input_data, advice_category, expected_advice", - [ - [ - ModelIsNotTOSACompatible(), - AdviceCategory.OPERATORS, - [ - Advice( - [ - "Some operators in the model are not TOSA compatible. " - "Please, refer to the operators table for more information." - ] - ) - ], - ], - [ - ModelIsTOSACompatible(), - AdviceCategory.OPERATORS, - [Advice(["Model is fully TOSA compatible."])], - ], - ], -) -def test_tosa_advice_producer( - tmpdir: str, - input_data: DataItem, - advice_category: AdviceCategory, - expected_advice: list[Advice], -) -> None: - """Test TOSA advice producer.""" - producer = TOSAAdviceProducer() - - context = ExecutionContext( - advice_category=advice_category, - working_dir=tmpdir, - ) - - producer.set_context(context) - producer.produce_advice(input_data) - - assert producer.get_advice() == expected_advice diff --git a/tests/test_devices_tosa_advisor.py b/tests/test_devices_tosa_advisor.py deleted file mode 100644 index 1c7a31a..0000000 --- a/tests/test_devices_tosa_advisor.py +++ /dev/null @@ -1,29 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for TOSA advisor.""" -from pathlib import Path - -from mlia.core.context import ExecutionContext -from mlia.core.workflow import DefaultWorkflowExecutor -from mlia.devices.tosa.advisor import configure_and_get_tosa_advisor -from mlia.devices.tosa.advisor import TOSAInferenceAdvisor - - -def test_configure_and_get_tosa_advisor(test_tflite_model: Path) -> None: - """Test TOSA advisor configuration.""" - ctx = ExecutionContext() - - advisor = configure_and_get_tosa_advisor(ctx, "tosa", test_tflite_model) - workflow = advisor.configure(ctx) - - assert isinstance(advisor, TOSAInferenceAdvisor) - - assert ctx.event_handlers is not None - assert ctx.config_parameters == { - "tosa_inference_advisor": { - "model": str(test_tflite_model), - "target_profile": "tosa", - } - } - - assert isinstance(workflow, DefaultWorkflowExecutor) diff --git a/tests/test_devices_tosa_data_analysis.py b/tests/test_devices_tosa_data_analysis.py deleted file mode 100644 index f2da691..0000000 --- a/tests/test_devices_tosa_data_analysis.py +++ /dev/null @@ -1,33 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for TOSA data analysis module.""" -from __future__ import annotations - -import pytest - -from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo -from mlia.core.common import DataItem -from mlia.core.data_analysis import Fact -from mlia.devices.tosa.data_analysis import ModelIsNotTOSACompatible -from mlia.devices.tosa.data_analysis import ModelIsTOSACompatible -from mlia.devices.tosa.data_analysis import TOSADataAnalyzer - - -@pytest.mark.parametrize( - "input_data, expected_facts", - [ - [ - TOSACompatibilityInfo(True, []), - [ModelIsTOSACompatible()], - ], - [ - TOSACompatibilityInfo(False, []), - [ModelIsNotTOSACompatible()], - ], - ], -) -def test_tosa_data_analyzer(input_data: DataItem, expected_facts: list[Fact]) -> None: - """Test TOSA data analyzer.""" - analyzer = TOSADataAnalyzer() - analyzer.analyze_data(input_data) - assert analyzer.get_analyzed_data() == expected_facts diff --git a/tests/test_devices_tosa_data_collection.py b/tests/test_devices_tosa_data_collection.py deleted file mode 100644 index 0c1eda1..0000000 --- a/tests/test_devices_tosa_data_collection.py +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for TOSA data collection module.""" -from pathlib import Path -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo -from mlia.core.context import ExecutionContext -from mlia.devices.tosa.data_collection import TOSAOperatorCompatibility - - -def test_tosa_data_collection( - monkeypatch: pytest.MonkeyPatch, test_tflite_model: Path, tmpdir: str -) -> None: - """Test TOSA data collection.""" - monkeypatch.setattr( - "mlia.devices.tosa.data_collection.get_tosa_compatibility_info", - MagicMock(return_value=TOSACompatibilityInfo(True, [])), - ) - context = ExecutionContext(working_dir=tmpdir) - collector = TOSAOperatorCompatibility(test_tflite_model) - collector.set_context(context) - - data_item = collector.collect_data() - - assert isinstance(data_item, TOSACompatibilityInfo) diff --git a/tests/test_mlia_utils_py_manager.py b/tests/test_mlia_utils_py_manager.py deleted file mode 100644 index e41680d..0000000 --- a/tests/test_mlia_utils_py_manager.py +++ /dev/null @@ -1,73 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for python package manager.""" -import sys -from unittest.mock import MagicMock - -import pytest - -from mlia.utils.py_manager import get_package_manager -from mlia.utils.py_manager import PyPackageManager - - -def test_get_package_manager() -> None: - """Test function get_package_manager.""" - manager = get_package_manager() - assert isinstance(manager, PyPackageManager) - - -@pytest.fixture(name="mock_check_call") -def mock_check_call_fixture(monkeypatch: pytest.MonkeyPatch) -> MagicMock: - """Mock check_call function.""" - mock_check_call = MagicMock() - monkeypatch.setattr("mlia.utils.py_manager.check_call", mock_check_call) - - return mock_check_call - - -def test_py_package_manager_metadata() -> None: - """Test getting package status.""" - manager = PyPackageManager() - assert manager.package_installed("pytest") - assert manager.packages_installed(["pytest", "mlia"]) - - -def test_py_package_manager_install(mock_check_call: MagicMock) -> None: - """Test package installation.""" - manager = PyPackageManager() - with pytest.raises(ValueError, match="No package names provided"): - manager.install([]) - - manager.install(["mlia", "pytest"]) - mock_check_call.assert_called_once_with( - [ - sys.executable, - "-m", - "pip", - "--disable-pip-version-check", - "install", - "mlia", - "pytest", - ] - ) - - -def test_py_package_manager_uninstall(mock_check_call: MagicMock) -> None: - """Test package removal.""" - manager = PyPackageManager() - with pytest.raises(ValueError, match="No package names provided"): - manager.uninstall([]) - - manager.uninstall(["mlia", "pytest"]) - mock_check_call.assert_called_once_with( - [ - sys.executable, - "-m", - "pip", - "--disable-pip-version-check", - "uninstall", - "--yes", - "mlia", - "pytest", - ] - ) diff --git a/tests/test_target_cortex_a_advice_generation.py b/tests/test_target_cortex_a_advice_generation.py new file mode 100644 index 0000000..02a2b14 --- /dev/null +++ b/tests/test_target_cortex_a_advice_generation.py @@ -0,0 +1,196 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for advice generation.""" +from __future__ import annotations + +import pytest + +from mlia.core.advice_generation import Advice +from mlia.core.common import AdviceCategory +from mlia.core.common import DataItem +from mlia.core.context import ExecutionContext +from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION +from mlia.target.cortex_a.advice_generation import CortexAAdviceProducer +from mlia.target.cortex_a.data_analysis import ModelHasCustomOperators +from mlia.target.cortex_a.data_analysis import ModelIsCortexACompatible +from mlia.target.cortex_a.data_analysis import ModelIsNotCortexACompatible +from mlia.target.cortex_a.data_analysis import ModelIsNotTFLiteCompatible +from mlia.target.cortex_a.data_analysis import TFLiteCompatibilityCheckFailed +from mlia.target.cortex_a.operator_compatibility import ARMNN_TFLITE_DELEGATE + +BACKEND_INFO = ( + f"{ARMNN_TFLITE_DELEGATE['metadata']['backend']} " + f"{ARMNN_TFLITE_DELEGATE['metadata']['version']}" +) + + +@pytest.mark.parametrize( + "input_data, advice_category, expected_advice", + [ + [ + ModelIsNotCortexACompatible(BACKEND_INFO, {"UNSUPPORTED_OP"}, {}), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "The following operators are not supported by " + f"{BACKEND_INFO} and will fall back to the TensorFlow " + "Lite runtime:", + " - UNSUPPORTED_OP", + ] + ), + Advice( + [ + "Please, refer to the full table of operators above " + "for more information.", + CortexAAdviceProducer.cortex_a_disclaimer, + ] + ), + ], + ], + [ + ModelIsNotCortexACompatible( + BACKEND_INFO, + {"UNSUPPORTED_OP"}, + { + "CONV_2D": ModelIsNotCortexACompatible.ActivationFunctionSupport( + used_unsupported={TFL_ACTIVATION_FUNCTION.SIGN_BIT.name}, + supported={"RELU"}, + ) + }, + ), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "The following operators are not supported by " + f"{BACKEND_INFO} and will fall back to the TensorFlow " + "Lite runtime:", + " - UNSUPPORTED_OP", + ] + ), + Advice( + [ + "The fused activation functions of the following " + f"operators are not supported by {BACKEND_INFO}. " + "Please consider using one of the supported activation " + "functions instead:", + " - CONV_2D\n" + " - Used unsupported: {'SIGN_BIT'}\n" + " - Supported: {'RELU'}", + ] + ), + Advice( + [ + "Please, refer to the full table of operators above " + "for more information.", + CortexAAdviceProducer.cortex_a_disclaimer, + ] + ), + ], + ], + [ + ModelIsCortexACompatible(BACKEND_INFO), + AdviceCategory.OPERATORS, + [ + Advice( + [ + f"Model is fully compatible with {BACKEND_INFO} for Cortex-A.", + CortexAAdviceProducer.cortex_a_disclaimer, + ] + ) + ], + ], + [ + ModelIsNotTFLiteCompatible( + flex_ops=["flex_op1", "flex_op2"], + custom_ops=["custom_op1", "custom_op2"], + ), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "The following operators are not natively " + "supported by TensorFlow Lite: flex_op1, flex_op2.", + "Using select TensorFlow operators in TensorFlow Lite model " + "requires special initialization of TFLiteConverter and " + "TensorFlow Lite run-time.", + "Please refer to the TensorFlow documentation for " + "more details: " + "https://www.tensorflow.org/lite/guide/ops_select", + "Note, such models are not supported by " + "the ML Inference Advisor.", + ] + ), + Advice( + [ + "The following operators appear to be custom and not natively " + "supported by TensorFlow Lite: custom_op1, custom_op2.", + "Using custom operators in TensorFlow Lite model " + "requires special initialization of TFLiteConverter and " + "TensorFlow Lite run-time.", + "Please refer to the TensorFlow documentation for " + "more details: " + "https://www.tensorflow.org/lite/guide/ops_custom", + "Note, such models are not supported by " + "the ML Inference Advisor.", + ] + ), + ], + ], + [ + ModelIsNotTFLiteCompatible(), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "Model could not be converted into TensorFlow Lite format.", + "Please refer to the table for more details.", + ] + ), + ], + ], + [ + ModelHasCustomOperators(), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "Models with custom operators require special initialization " + "and currently are not supported by the ML Inference Advisor.", + ] + ), + ], + ], + [ + TFLiteCompatibilityCheckFailed(), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "Model could not be converted into TensorFlow Lite format.", + "Please refer to the table for more details.", + ] + ), + ], + ], + ], +) +def test_cortex_a_advice_producer( + tmpdir: str, + input_data: DataItem, + advice_category: AdviceCategory, + expected_advice: list[Advice], +) -> None: + """Test Cortex-A advice producer.""" + producer = CortexAAdviceProducer() + + context = ExecutionContext( + advice_category=advice_category, + working_dir=tmpdir, + ) + + producer.set_context(context) + producer.produce_advice(input_data) + + assert producer.get_advice() == expected_advice diff --git a/tests/test_target_cortex_a_advisor.py b/tests/test_target_cortex_a_advisor.py new file mode 100644 index 0000000..1788a6d --- /dev/null +++ b/tests/test_target_cortex_a_advisor.py @@ -0,0 +1,34 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Cortex-A MLIA module.""" +from pathlib import Path + +from mlia.core.context import ExecutionContext +from mlia.core.workflow import DefaultWorkflowExecutor +from mlia.target.cortex_a.advisor import configure_and_get_cortexa_advisor +from mlia.target.cortex_a.advisor import CortexAInferenceAdvisor + + +def test_advisor_metadata() -> None: + """Test advisor metadata.""" + assert CortexAInferenceAdvisor.name() == "cortex_a_inference_advisor" + + +def test_configure_and_get_cortex_a_advisor(test_tflite_model: Path) -> None: + """Test Cortex-A advisor configuration.""" + ctx = ExecutionContext() + + advisor = configure_and_get_cortexa_advisor(ctx, "cortex-a", test_tflite_model) + workflow = advisor.configure(ctx) + + assert isinstance(advisor, CortexAInferenceAdvisor) + + assert ctx.event_handlers is not None + assert ctx.config_parameters == { + "cortex_a_inference_advisor": { + "model": str(test_tflite_model), + "target_profile": "cortex-a", + } + } + + assert isinstance(workflow, DefaultWorkflowExecutor) diff --git a/tests/test_target_cortex_a_data_analysis.py b/tests/test_target_cortex_a_data_analysis.py new file mode 100644 index 0000000..b223b01 --- /dev/null +++ b/tests/test_target_cortex_a_data_analysis.py @@ -0,0 +1,162 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Cortex-A data analysis module.""" +from __future__ import annotations + +import pytest + +from mlia.core.common import DataItem +from mlia.core.data_analysis import Fact +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityStatus +from mlia.nn.tensorflow.tflite_compat import TFLiteConversionError +from mlia.nn.tensorflow.tflite_compat import TFLiteConversionErrorCode +from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION +from mlia.target.cortex_a.data_analysis import CortexADataAnalyzer +from mlia.target.cortex_a.data_analysis import ModelHasCustomOperators +from mlia.target.cortex_a.data_analysis import ModelIsCortexACompatible +from mlia.target.cortex_a.data_analysis import ModelIsNotCortexACompatible +from mlia.target.cortex_a.data_analysis import ModelIsNotTFLiteCompatible +from mlia.target.cortex_a.data_analysis import TFLiteCompatibilityCheckFailed +from mlia.target.cortex_a.operator_compatibility import ARMNN_TFLITE_DELEGATE +from mlia.target.cortex_a.operators import CortexACompatibilityInfo +from mlia.target.cortex_a.operators import Operator + +BACKEND_INFO = ( + f"{ARMNN_TFLITE_DELEGATE['metadata']['backend']} " + f"{ARMNN_TFLITE_DELEGATE['metadata']['version']}" +) + + +@pytest.mark.parametrize( + "input_data, expected_facts", + [ + [ + CortexACompatibilityInfo(True, []), + [ModelIsCortexACompatible(BACKEND_INFO)], + ], + [ + CortexACompatibilityInfo( + True, + [ + Operator( + "CONV_2D", + "somewhere", + support_type=Operator.SupportType.COMPATIBLE, + activation_func=TFL_ACTIVATION_FUNCTION.NONE, + ), + Operator( + "CUSTOM", + "somewhere else", + support_type=Operator.SupportType.COMPATIBLE, + activation_func=TFL_ACTIVATION_FUNCTION.SIGN_BIT, + custom_name="MaxPool3D", + ), + ], + ), + [ModelIsCortexACompatible(BACKEND_INFO)], + ], + [ + # pylint: disable=line-too-long + CortexACompatibilityInfo( + False, + [ + Operator( + "UNSUPPORTED_OP", + "somewhere", + support_type=Operator.SupportType.OP_NOT_SUPPORTED, + activation_func=TFL_ACTIVATION_FUNCTION.NONE, + ), + Operator( + "CUSTOM", + "somewhere", + support_type=Operator.SupportType.OP_NOT_SUPPORTED, + activation_func=TFL_ACTIVATION_FUNCTION.NONE, + custom_name="UNSUPPORTED_OP", + ), + Operator( + "CONV_2D", + "somewhere else", + support_type=Operator.SupportType.ACTIVATION_NOT_SUPPORTED, + activation_func=TFL_ACTIVATION_FUNCTION.SIGN_BIT, + ), + ], + ), + [ + ModelIsNotCortexACompatible( + BACKEND_INFO, + { + "UNSUPPORTED_OP", + "CUSTOM - 'UNSUPPORTED_OP'", + }, + { + "CONV_2D": ModelIsNotCortexACompatible.ActivationFunctionSupport( + used_unsupported={TFL_ACTIVATION_FUNCTION.SIGN_BIT.name}, + supported={ + "RELU", + "RELU6", + "RELU_N1_TO_1", + "SIGMOID", + "TANH", + "NONE", + }, + ) + }, + ) + ], + # pylint: enable=line-too-long + ], + [ + TFLiteCompatibilityInfo(status=TFLiteCompatibilityStatus.COMPATIBLE), + [], + ], + [ + TFLiteCompatibilityInfo( + status=TFLiteCompatibilityStatus.MODEL_WITH_CUSTOM_OP_ERROR + ), + [ModelHasCustomOperators()], + ], + [ + TFLiteCompatibilityInfo(status=TFLiteCompatibilityStatus.UNKNOWN_ERROR), + [TFLiteCompatibilityCheckFailed()], + ], + [ + TFLiteCompatibilityInfo( + status=TFLiteCompatibilityStatus.TFLITE_CONVERSION_ERROR + ), + [ModelIsNotTFLiteCompatible(custom_ops=[], flex_ops=[])], + ], + [ + TFLiteCompatibilityInfo( + status=TFLiteCompatibilityStatus.TFLITE_CONVERSION_ERROR, + conversion_errors=[ + TFLiteConversionError( + "error", + TFLiteConversionErrorCode.NEEDS_CUSTOM_OPS, + "custom_op1", + [], + ), + TFLiteConversionError( + "error", + TFLiteConversionErrorCode.NEEDS_FLEX_OPS, + "flex_op1", + [], + ), + ], + ), + [ + ModelIsNotTFLiteCompatible( + custom_ops=["custom_op1"], + flex_ops=["flex_op1"], + ) + ], + ], + ], +) +def test_cortex_a_data_analyzer( + input_data: DataItem, expected_facts: list[Fact] +) -> None: + """Test Cortex-A data analyzer.""" + analyzer = CortexADataAnalyzer() + analyzer.analyze_data(input_data) + assert analyzer.get_analyzed_data() == expected_facts diff --git a/tests/test_target_cortex_a_data_collection.py b/tests/test_target_cortex_a_data_collection.py new file mode 100644 index 0000000..7504166 --- /dev/null +++ b/tests/test_target_cortex_a_data_collection.py @@ -0,0 +1,52 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Cortex-A data collection module.""" +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from mlia.core.context import ExecutionContext +from mlia.target.cortex_a.data_collection import CortexAOperatorCompatibility +from mlia.target.cortex_a.operators import CortexACompatibilityInfo + + +def check_cortex_a_data_collection( + monkeypatch: pytest.MonkeyPatch, model: Path, tmpdir: str +) -> None: + """Test Cortex-A data collection.""" + assert CortexAOperatorCompatibility.name() + + monkeypatch.setattr( + "mlia.target.cortex_a.data_collection.get_cortex_a_compatibility_info", + MagicMock(return_value=CortexACompatibilityInfo(True, [])), + ) + + context = ExecutionContext(working_dir=tmpdir) + collector = CortexAOperatorCompatibility(model) + collector.set_context(context) + + data_item = collector.collect_data() + + assert isinstance(data_item, CortexACompatibilityInfo) + + +def test_cortex_a_data_collection_tflite( + monkeypatch: pytest.MonkeyPatch, test_tflite_model: Path, tmpdir: str +) -> None: + """Test Cortex-A data collection with a TensorFlow Lite model.""" + check_cortex_a_data_collection(monkeypatch, test_tflite_model, tmpdir) + + +def test_cortex_a_data_collection_keras( + monkeypatch: pytest.MonkeyPatch, test_keras_model: Path, tmpdir: str +) -> None: + """Test Cortex-A data collection with a Keras model.""" + check_cortex_a_data_collection(monkeypatch, test_keras_model, tmpdir) + + +def test_cortex_a_data_collection_tf( + monkeypatch: pytest.MonkeyPatch, test_tf_model: Path, tmpdir: str +) -> None: + """Test Cortex-A data collection with a SavedModel.""" + check_cortex_a_data_collection(monkeypatch, test_tf_model, tmpdir) diff --git a/tests/test_target_cortex_a_operators.py b/tests/test_target_cortex_a_operators.py new file mode 100644 index 0000000..94eb890 --- /dev/null +++ b/tests/test_target_cortex_a_operators.py @@ -0,0 +1,73 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Cortex-A operator compatibility.""" +from pathlib import Path + +import pytest +import tensorflow as tf + +from mlia.nn.tensorflow.tflite_graph import TFL_OP +from mlia.nn.tensorflow.utils import convert_to_tflite +from mlia.target.cortex_a import operator_compatibility as op_compat +from mlia.target.cortex_a.operators import CortexACompatibilityInfo +from mlia.target.cortex_a.operators import get_cortex_a_compatibility_info +from mlia.target.cortex_a.operators import Operator + + +def test_op_compat_data() -> None: + """Make sure all data contains the necessary items.""" + builtin_tfl_ops = {op.name for op in TFL_OP} + for data in [op_compat.ARMNN_TFLITE_DELEGATE]: + assert "metadata" in data + assert "backend" in data["metadata"] + assert "version" in data["metadata"] + assert "builtin_ops" in data + for comp in data["builtin_ops"]: + assert comp in builtin_tfl_ops + assert "custom_ops" in data + + +def check_get_cortex_a_compatibility_info( + model_path: Path, + expected_success: bool, +) -> None: + """Check the function 'get_cortex_a_compatibility_info'.""" + compat_info = get_cortex_a_compatibility_info(model_path) + assert isinstance(compat_info, CortexACompatibilityInfo) + assert expected_success == compat_info.cortex_a_compatible + assert compat_info.operators + for oper in compat_info.operators: + assert oper.name + assert oper.location + assert oper.support_type in Operator.SupportType + + +def test_get_cortex_a_compatibility_info_compatible( + test_tflite_model: Path, +) -> None: + """Test a fully compatible TensorFlow Lite model.""" + check_get_cortex_a_compatibility_info(test_tflite_model, expected_success=True) + + +def test_get_cortex_a_compatibility_info_not_compatible( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Construct and test a NOT fully compatible TensorFlow Lite model.""" + keras_model = tf.keras.Sequential( + [ + tf.keras.Input(shape=(28, 28, 1), batch_size=1, name="input"), + tf.keras.layers.Conv2D( + filters=12, kernel_size=(3, 3), activation="softmax", name="conv1" + ), + tf.keras.layers.LeakyReLU(), + ] + ) + keras_model.compile(optimizer="sgd", loss="mean_squared_error") + tflite_model = convert_to_tflite(keras_model, quantized=False) + + monkeypatch.setattr( + "mlia.nn.tensorflow.tflite_graph.load_tflite", lambda _p: tflite_model + ) + check_get_cortex_a_compatibility_info( + Path("NOT_USED_BECAUSE_OF_MOCKING"), expected_success=False + ) diff --git a/tests/test_target_cortex_a_reporters.py b/tests/test_target_cortex_a_reporters.py new file mode 100644 index 0000000..4b39aa1 --- /dev/null +++ b/tests/test_target_cortex_a_reporters.py @@ -0,0 +1,53 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Cortex-A reporters.""" +from typing import Any + +import pytest + +from mlia.core.advice_generation import Advice +from mlia.core.reporting import Report +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityInfo +from mlia.nn.tensorflow.tflite_compat import TFLiteCompatibilityStatus +from mlia.nn.tensorflow.tflite_graph import TFL_ACTIVATION_FUNCTION +from mlia.target.cortex_a.config import CortexAConfiguration +from mlia.target.cortex_a.operators import Operator +from mlia.target.cortex_a.reporters import cortex_a_formatters +from mlia.target.cortex_a.reporters import report_device + + +def test_report_device() -> None: + """Test function report_device().""" + report = report_device(CortexAConfiguration("cortex-a")) + assert report.to_plain_text() + + +@pytest.mark.parametrize( + "data", + ( + [Advice(["Sample", "Advice"])], + TFLiteCompatibilityInfo(status=TFLiteCompatibilityStatus.COMPATIBLE), + [ + Operator( + name="Test", + location="loc", + support_type=Operator.SupportType.OP_NOT_SUPPORTED, + activation_func=TFL_ACTIVATION_FUNCTION.NONE, + ) + ], + ), +) +def test_cortex_a_formatters(data: Any) -> None: + """Test function cortex_a_formatters() with valid input.""" + formatter = cortex_a_formatters(data) + report = formatter(data) + assert isinstance(report, Report) + + +def test_cortex_a_formatters_invalid_data() -> None: + """Test cortex_a_formatters() with invalid input.""" + with pytest.raises( + Exception, + match=r"^Unable to find appropriate formatter for .*", + ): + cortex_a_formatters(12) diff --git a/tests/test_target_ethos_u_advice_generation.py b/tests/test_target_ethos_u_advice_generation.py new file mode 100644 index 0000000..1569592 --- /dev/null +++ b/tests/test_target_ethos_u_advice_generation.py @@ -0,0 +1,482 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Ethos-U advice generation.""" +from __future__ import annotations + +import pytest + +from mlia.cli.helpers import CLIActionResolver +from mlia.core.advice_generation import Advice +from mlia.core.common import AdviceCategory +from mlia.core.common import DataItem +from mlia.core.context import ExecutionContext +from mlia.core.helpers import ActionResolver +from mlia.core.helpers import APIActionResolver +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.target.ethos_u.advice_generation import EthosUAdviceProducer +from mlia.target.ethos_u.advice_generation import EthosUStaticAdviceProducer +from mlia.target.ethos_u.data_analysis import AllOperatorsSupportedOnNPU +from mlia.target.ethos_u.data_analysis import HasCPUOnlyOperators +from mlia.target.ethos_u.data_analysis import HasUnsupportedOnNPUOperators +from mlia.target.ethos_u.data_analysis import OptimizationDiff +from mlia.target.ethos_u.data_analysis import OptimizationResults +from mlia.target.ethos_u.data_analysis import PerfMetricDiff + + +@pytest.mark.parametrize( + "input_data, advice_category, action_resolver, expected_advice", + [ + [ + AllOperatorsSupportedOnNPU(), + AdviceCategory.OPERATORS, + APIActionResolver(), + [ + Advice( + [ + "You don't have any unsupported operators, your model will " + "run completely on NPU." + ] + ) + ], + ], + [ + AllOperatorsSupportedOnNPU(), + AdviceCategory.OPERATORS, + CLIActionResolver( + { + "target_profile": "sample_target", + "model": "sample_model.tflite", + } + ), + [ + Advice( + [ + "You don't have any unsupported operators, your model will " + "run completely on NPU.", + "Check the estimated performance by running the " + "following command: ", + "mlia performance --target-profile sample_target " + "sample_model.tflite", + ] + ) + ], + ], + [ + HasCPUOnlyOperators(cpu_only_ops=["OP1", "OP2", "OP3"]), + AdviceCategory.OPERATORS, + APIActionResolver(), + [ + Advice( + [ + "You have at least 3 operators that is CPU only: " + "OP1,OP2,OP3.", + "Using operators that are supported by the NPU will " + "improve performance.", + ] + ) + ], + ], + [ + HasCPUOnlyOperators(cpu_only_ops=["OP1", "OP2", "OP3"]), + AdviceCategory.OPERATORS, + CLIActionResolver({}), + [ + Advice( + [ + "You have at least 3 operators that is CPU only: " + "OP1,OP2,OP3.", + "Using operators that are supported by the NPU will " + "improve performance.", + "For guidance on supported operators, run: mlia operators " + "--supported-ops-report", + ] + ) + ], + ], + [ + HasUnsupportedOnNPUOperators(npu_unsupported_ratio=0.4), + AdviceCategory.OPERATORS, + APIActionResolver(), + [ + Advice( + [ + "You have 40% of operators that cannot be placed on the NPU.", + "For better performance, please review the reasons reported " + "in the table, and adjust the model accordingly " + "where possible.", + ] + ) + ], + ], + [ + HasUnsupportedOnNPUOperators(npu_unsupported_ratio=0.4), + AdviceCategory.OPERATORS, + CLIActionResolver({}), + [ + Advice( + [ + "You have 40% of operators that cannot be placed on the NPU.", + "For better performance, please review the reasons reported " + "in the table, and adjust the model accordingly " + "where possible.", + ] + ) + ], + ], + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[OptimizationSettings("pruning", 0.5, None)], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 50), + "on_chip_flash": PerfMetricDiff(100, 100), + "off_chip_flash": PerfMetricDiff(100, 100), + "npu_total_cycles": PerfMetricDiff(10, 5), + }, + ), + ] + ), + AdviceCategory.OPTIMIZATION, + APIActionResolver(), + [ + Advice( + [ + "With the selected optimization (pruning: 0.5)", + "- You have achieved 50.00% performance improvement in " + "DRAM used (KB)", + "- You have achieved 50.00% performance improvement in " + "NPU total cycles", + "- SRAM used (KB) have degraded by 50.00%", + "You can try to push the optimization target higher " + "(e.g. pruning: 0.6) " + "to check if those results can be further improved.", + ] + ), + Advice( + [ + "The applied tooling techniques have an impact " + "on accuracy. Additional hyperparameter tuning may be required " + "after any optimization." + ] + ), + ], + ], + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[OptimizationSettings("pruning", 0.5, None)], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 50), + "on_chip_flash": PerfMetricDiff(100, 100), + "off_chip_flash": PerfMetricDiff(100, 100), + "npu_total_cycles": PerfMetricDiff(10, 5), + }, + ), + ] + ), + AdviceCategory.OPTIMIZATION, + CLIActionResolver({"model": "sample_model.h5"}), + [ + Advice( + [ + "With the selected optimization (pruning: 0.5)", + "- You have achieved 50.00% performance improvement in " + "DRAM used (KB)", + "- You have achieved 50.00% performance improvement in " + "NPU total cycles", + "- SRAM used (KB) have degraded by 50.00%", + "You can try to push the optimization target higher " + "(e.g. pruning: 0.6) " + "to check if those results can be further improved.", + "For more info: mlia optimization --help", + "Optimization command: " + "mlia optimization --optimization-type pruning " + "--optimization-target 0.6 sample_model.h5", + ] + ), + Advice( + [ + "The applied tooling techniques have an impact " + "on accuracy. Additional hyperparameter tuning may be required " + "after any optimization." + ] + ), + ], + ], + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[ + OptimizationSettings("pruning", 0.5, None), + OptimizationSettings("clustering", 32, None), + ], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 50), + "on_chip_flash": PerfMetricDiff(100, 100), + "off_chip_flash": PerfMetricDiff(100, 100), + "npu_total_cycles": PerfMetricDiff(10, 5), + }, + ), + ] + ), + AdviceCategory.OPTIMIZATION, + APIActionResolver(), + [ + Advice( + [ + "With the selected optimization (pruning: 0.5, clustering: 32)", + "- You have achieved 50.00% performance improvement in " + "DRAM used (KB)", + "- You have achieved 50.00% performance improvement in " + "NPU total cycles", + "- SRAM used (KB) have degraded by 50.00%", + "You can try to push the optimization target higher " + "(e.g. pruning: 0.6 and/or clustering: 16) " + "to check if those results can be further improved.", + ] + ), + Advice( + [ + "The applied tooling techniques have an impact " + "on accuracy. Additional hyperparameter tuning may be required " + "after any optimization." + ] + ), + ], + ], + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[ + OptimizationSettings("clustering", 2, None), + ], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 50), + "on_chip_flash": PerfMetricDiff(100, 100), + "off_chip_flash": PerfMetricDiff(100, 100), + "npu_total_cycles": PerfMetricDiff(10, 5), + }, + ), + ] + ), + AdviceCategory.OPTIMIZATION, + APIActionResolver(), + [ + Advice( + [ + "With the selected optimization (clustering: 2)", + "- You have achieved 50.00% performance improvement in " + "DRAM used (KB)", + "- You have achieved 50.00% performance improvement in " + "NPU total cycles", + "- SRAM used (KB) have degraded by 50.00%", + ] + ), + Advice( + [ + "The applied tooling techniques have an impact " + "on accuracy. Additional hyperparameter tuning may be required " + "after any optimization." + ] + ), + ], + ], + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[OptimizationSettings("pruning", 0.5, None)], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 150), + "on_chip_flash": PerfMetricDiff(100, 150), + "off_chip_flash": PerfMetricDiff(100, 150), + "npu_total_cycles": PerfMetricDiff(10, 100), + }, + ), + ] + ), + AdviceCategory.OPTIMIZATION, + APIActionResolver(), + [ + Advice( + [ + "With the selected optimization (pruning: 0.5)", + "- DRAM used (KB) have degraded by 50.00%", + "- SRAM used (KB) have degraded by 50.00%", + "- On chip flash used (KB) have degraded by 50.00%", + "- Off chip flash used (KB) have degraded by 50.00%", + "- NPU total cycles have degraded by 900.00%", + "The performance seems to have degraded after " + "applying the selected optimizations, " + "try exploring different optimization types/targets.", + ] + ), + Advice( + [ + "The applied tooling techniques have an impact " + "on accuracy. Additional hyperparameter tuning may be required " + "after any optimization." + ] + ), + ], + ], + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[OptimizationSettings("pruning", 0.5, None)], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 150), + "on_chip_flash": PerfMetricDiff(100, 150), + "off_chip_flash": PerfMetricDiff(100, 150), + "npu_total_cycles": PerfMetricDiff(10, 100), + }, + ), + OptimizationDiff( + opt_type=[OptimizationSettings("pruning", 0.6, None)], + opt_diffs={ + "sram": PerfMetricDiff(100, 150), + "dram": PerfMetricDiff(100, 150), + "on_chip_flash": PerfMetricDiff(100, 150), + "off_chip_flash": PerfMetricDiff(100, 150), + "npu_total_cycles": PerfMetricDiff(10, 100), + }, + ), + ] + ), + AdviceCategory.OPTIMIZATION, + APIActionResolver(), + [], # no advice for more than one optimization result + ], + ], +) +def test_ethosu_advice_producer( + tmpdir: str, + input_data: DataItem, + expected_advice: list[Advice], + advice_category: AdviceCategory, + action_resolver: ActionResolver, +) -> None: + """Test Ethos-U Advice producer.""" + producer = EthosUAdviceProducer() + + context = ExecutionContext( + advice_category=advice_category, + working_dir=tmpdir, + action_resolver=action_resolver, + ) + + producer.set_context(context) + producer.produce_advice(input_data) + + assert producer.get_advice() == expected_advice + + +@pytest.mark.parametrize( + "advice_category, action_resolver, expected_advice", + [ + [ + AdviceCategory.ALL, + None, + [], + ], + [ + AdviceCategory.OPERATORS, + None, + [], + ], + [ + AdviceCategory.PERFORMANCE, + APIActionResolver(), + [ + Advice( + [ + "You can improve the inference time by using only operators " + "that are supported by the NPU.", + ] + ), + Advice( + [ + "Check if you can improve the performance by applying " + "tooling techniques to your model." + ] + ), + ], + ], + [ + AdviceCategory.PERFORMANCE, + CLIActionResolver({"model": "test_model.h5"}), + [ + Advice( + [ + "You can improve the inference time by using only operators " + "that are supported by the NPU.", + "Try running the following command to verify that:", + "mlia operators test_model.h5", + ] + ), + Advice( + [ + "Check if you can improve the performance by applying " + "tooling techniques to your model.", + "For example: mlia optimization --optimization-type " + "pruning,clustering --optimization-target 0.5,32 " + "test_model.h5", + "For more info: mlia optimization --help", + ] + ), + ], + ], + [ + AdviceCategory.OPTIMIZATION, + APIActionResolver(), + [ + Advice( + [ + "For better performance, make sure that all the operators " + "of your final TensorFlow Lite model are supported by the NPU.", + ] + ) + ], + ], + [ + AdviceCategory.OPTIMIZATION, + CLIActionResolver({"model": "test_model.h5"}), + [ + Advice( + [ + "For better performance, make sure that all the operators " + "of your final TensorFlow Lite model are supported by the NPU.", + "For more details, run: mlia operators --help", + ] + ) + ], + ], + ], +) +def test_ethosu_static_advice_producer( + tmpdir: str, + advice_category: AdviceCategory, + action_resolver: ActionResolver, + expected_advice: list[Advice], +) -> None: + """Test static advice generation.""" + producer = EthosUStaticAdviceProducer() + + context = ExecutionContext( + advice_category=advice_category, + working_dir=tmpdir, + action_resolver=action_resolver, + ) + producer.set_context(context) + assert producer.get_advice() == expected_advice diff --git a/tests/test_target_ethos_u_advisor.py b/tests/test_target_ethos_u_advisor.py new file mode 100644 index 0000000..fb68800 --- /dev/null +++ b/tests/test_target_ethos_u_advisor.py @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Ethos-U MLIA module.""" +from mlia.target.ethos_u.advisor import EthosUInferenceAdvisor + + +def test_advisor_metadata() -> None: + """Test advisor metadata.""" + assert EthosUInferenceAdvisor.name() == "ethos_u_inference_advisor" diff --git a/tests/test_target_ethos_u_config.py b/tests/test_target_ethos_u_config.py new file mode 100644 index 0000000..6ccd5ce --- /dev/null +++ b/tests/test_target_ethos_u_config.py @@ -0,0 +1,125 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for config module.""" +from __future__ import annotations + +from contextlib import ExitStack as does_not_raise +from typing import Any +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.vela.compiler import VelaCompilerOptions +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.config import get_target +from mlia.utils.filesystem import get_vela_config + + +def test_compiler_options_default_init() -> None: + """Test compiler options default init.""" + opts = VelaCompilerOptions() + + assert opts.config_files is None + assert opts.system_config == "internal-default" + assert opts.memory_mode == "internal-default" + assert opts.accelerator_config is None + assert opts.max_block_dependency == 3 + assert opts.arena_cache_size is None + assert opts.tensor_allocator == "HillClimb" + assert opts.cpu_tensor_alignment == 16 + assert opts.optimization_strategy == "Performance" + assert opts.output_dir is None + + +def test_ethosu_target() -> None: + """Test Ethos-U target configuration init.""" + default_config = EthosUConfiguration("ethos-u55-256") + + assert default_config.target == "ethos-u55" + assert default_config.mac == 256 + assert default_config.compiler_options is not None + + +def test_get_target() -> None: + """Test function get_target.""" + with pytest.raises(Exception, match="No target profile given"): + get_target(None) # type: ignore + + with pytest.raises(Exception, match="Unable to find target profile unknown"): + get_target("unknown") + + u65_device = get_target("ethos-u65-512") + + assert isinstance(u65_device, EthosUConfiguration) + assert u65_device.target == "ethos-u65" + assert u65_device.mac == 512 + assert u65_device.compiler_options.accelerator_config == "ethos-u65-512" + assert u65_device.compiler_options.memory_mode == "Dedicated_Sram" + assert u65_device.compiler_options.config_files == str(get_vela_config()) + + +@pytest.mark.parametrize( + "profile_data, expected_error", + [ + [ + {}, + pytest.raises( + Exception, + match="Mandatory fields missing from target profile: " + r"\['mac', 'memory_mode', 'system_config', 'target'\]", + ), + ], + [ + {"target": "ethos-u65", "mac": 512}, + pytest.raises( + Exception, + match="Mandatory fields missing from target profile: " + r"\['memory_mode', 'system_config'\]", + ), + ], + [ + { + "target": "ethos-u65", + "mac": 2, + "system_config": "Ethos_U65_Embedded", + "memory_mode": "Shared_Sram", + }, + pytest.raises( + Exception, + match=r"Mac value for selected device should be in \[256, 512\]", + ), + ], + [ + { + "target": "ethos-u55", + "mac": 1, + "system_config": "Ethos_U55_High_End_Embedded", + "memory_mode": "Shared_Sram", + }, + pytest.raises( + Exception, + match="Mac value for selected device should be " + r"in \[32, 64, 128, 256\]", + ), + ], + [ + { + "target": "ethos-u65", + "mac": 512, + "system_config": "Ethos_U65_Embedded", + "memory_mode": "Shared_Sram", + }, + does_not_raise(), + ], + ], +) +def test_ethosu_configuration( + monkeypatch: pytest.MonkeyPatch, profile_data: dict[str, Any], expected_error: Any +) -> None: + """Test creating Ethos-U configuration.""" + monkeypatch.setattr( + "mlia.target.ethos_u.config.get_profile", MagicMock(return_value=profile_data) + ) + + with expected_error: + EthosUConfiguration("target") diff --git a/tests/test_target_ethos_u_data_analysis.py b/tests/test_target_ethos_u_data_analysis.py new file mode 100644 index 0000000..bac27ad --- /dev/null +++ b/tests/test_target_ethos_u_data_analysis.py @@ -0,0 +1,147 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Ethos-U data analysis module.""" +from __future__ import annotations + +import pytest + +from mlia.backend.vela.compat import NpuSupported +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators +from mlia.core.common import DataItem +from mlia.core.data_analysis import Fact +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.data_analysis import AllOperatorsSupportedOnNPU +from mlia.target.ethos_u.data_analysis import EthosUDataAnalyzer +from mlia.target.ethos_u.data_analysis import HasCPUOnlyOperators +from mlia.target.ethos_u.data_analysis import HasUnsupportedOnNPUOperators +from mlia.target.ethos_u.data_analysis import OptimizationDiff +from mlia.target.ethos_u.data_analysis import OptimizationResults +from mlia.target.ethos_u.data_analysis import PerfMetricDiff +from mlia.target.ethos_u.performance import MemoryUsage +from mlia.target.ethos_u.performance import NPUCycles +from mlia.target.ethos_u.performance import OptimizationPerformanceMetrics +from mlia.target.ethos_u.performance import PerformanceMetrics + + +def test_perf_metrics_diff() -> None: + """Test PerfMetricsDiff class.""" + diff_same = PerfMetricDiff(1, 1) + assert diff_same.same is True + assert diff_same.improved is False + assert diff_same.degraded is False + assert diff_same.diff == 0 + + diff_improved = PerfMetricDiff(10, 5) + assert diff_improved.same is False + assert diff_improved.improved is True + assert diff_improved.degraded is False + assert diff_improved.diff == 50.0 + + diff_degraded = PerfMetricDiff(5, 10) + assert diff_degraded.same is False + assert diff_degraded.improved is False + assert diff_degraded.degraded is True + assert diff_degraded.diff == -100.0 + + diff_original_zero = PerfMetricDiff(0, 1) + assert diff_original_zero.diff == 0 + + +@pytest.mark.parametrize( + "input_data, expected_facts", + [ + [ + Operators( + [ + Operator( + "CPU operator", + "CPU operator type", + NpuSupported(False, [("CPU only operator", "")]), + ) + ] + ), + [ + HasCPUOnlyOperators(["CPU operator type"]), + HasUnsupportedOnNPUOperators(1.0), + ], + ], + [ + Operators( + [ + Operator( + "NPU operator", + "NPU operator type", + NpuSupported(True, []), + ) + ] + ), + [ + AllOperatorsSupportedOnNPU(), + ], + ], + [ + OptimizationPerformanceMetrics( + PerformanceMetrics( + EthosUConfiguration("ethos-u55-256"), + NPUCycles(1, 2, 3, 4, 5, 6), + # memory metrics are in kilobytes + MemoryUsage(*[i * 1024 for i in range(1, 6)]), # type: ignore + ), + [ + [ + [ + OptimizationSettings("pruning", 0.5, None), + ], + PerformanceMetrics( + EthosUConfiguration("ethos-u55-256"), + NPUCycles(1, 2, 3, 4, 5, 6), + # memory metrics are in kilobytes + MemoryUsage( + *[i * 1024 for i in range(1, 6)] # type: ignore + ), + ), + ], + ], + ), + [ + OptimizationResults( + [ + OptimizationDiff( + opt_type=[ + OptimizationSettings("pruning", 0.5, None), + ], + opt_diffs={ + "sram": PerfMetricDiff(1.0, 1.0), + "dram": PerfMetricDiff(2.0, 2.0), + "on_chip_flash": PerfMetricDiff(4.0, 4.0), + "off_chip_flash": PerfMetricDiff(5.0, 5.0), + "npu_total_cycles": PerfMetricDiff(3, 3), + }, + ) + ] + ) + ], + ], + [ + OptimizationPerformanceMetrics( + PerformanceMetrics( + EthosUConfiguration("ethos-u55-256"), + NPUCycles(1, 2, 3, 4, 5, 6), + # memory metrics are in kilobytes + MemoryUsage(*[i * 1024 for i in range(1, 6)]), # type: ignore + ), + [], + ), + [], + ], + ], +) +def test_ethos_u_data_analyzer( + input_data: DataItem, expected_facts: list[Fact] +) -> None: + """Test Ethos-U data analyzer.""" + analyzer = EthosUDataAnalyzer() + analyzer.analyze_data(input_data) + assert analyzer.get_analyzed_data() == expected_facts diff --git a/tests/test_target_ethos_u_data_collection.py b/tests/test_target_ethos_u_data_collection.py new file mode 100644 index 0000000..2cf7482 --- /dev/null +++ b/tests/test_target_ethos_u_data_collection.py @@ -0,0 +1,151 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for the data collection module for Ethos-U.""" +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.vela.compat import Operators +from mlia.core.context import Context +from mlia.core.data_collection import DataCollector +from mlia.core.errors import FunctionalityNotSupportedError +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.data_collection import EthosUOperatorCompatibility +from mlia.target.ethos_u.data_collection import EthosUOptimizationPerformance +from mlia.target.ethos_u.data_collection import EthosUPerformance +from mlia.target.ethos_u.performance import MemoryUsage +from mlia.target.ethos_u.performance import NPUCycles +from mlia.target.ethos_u.performance import OptimizationPerformanceMetrics +from mlia.target.ethos_u.performance import PerformanceMetrics + + +@pytest.mark.parametrize( + "collector, expected_name", + [ + ( + EthosUOperatorCompatibility, + "ethos_u_operator_compatibility", + ), + ( + EthosUPerformance, + "ethos_u_performance", + ), + ( + EthosUOptimizationPerformance, + "ethos_u_model_optimizations", + ), + ], +) +def test_collectors_metadata( + collector: DataCollector, + expected_name: str, +) -> None: + """Test collectors metadata.""" + assert collector.name() == expected_name + + +def test_operator_compatibility_collector( + sample_context: Context, test_tflite_model: Path +) -> None: + """Test operator compatibility data collector.""" + device = EthosUConfiguration("ethos-u55-256") + + collector = EthosUOperatorCompatibility(test_tflite_model, device) + collector.set_context(sample_context) + + result = collector.collect_data() + assert isinstance(result, Operators) + + +def test_performance_collector( + monkeypatch: pytest.MonkeyPatch, sample_context: Context, test_tflite_model: Path +) -> None: + """Test performance data collector.""" + device = EthosUConfiguration("ethos-u55-256") + + mock_performance_estimation(monkeypatch, device) + + collector = EthosUPerformance(test_tflite_model, device) + collector.set_context(sample_context) + + result = collector.collect_data() + assert isinstance(result, PerformanceMetrics) + + +def test_optimization_performance_collector( + monkeypatch: pytest.MonkeyPatch, + sample_context: Context, + test_keras_model: Path, + test_tflite_model: Path, +) -> None: + """Test optimization performance data collector.""" + device = EthosUConfiguration("ethos-u55-256") + + mock_performance_estimation(monkeypatch, device) + collector = EthosUOptimizationPerformance( + test_keras_model, + device, + [ + [ + {"optimization_type": "pruning", "optimization_target": 0.5}, + ] + ], + ) + collector.set_context(sample_context) + result = collector.collect_data() + + assert isinstance(result, OptimizationPerformanceMetrics) + assert isinstance(result.original_perf_metrics, PerformanceMetrics) + assert isinstance(result.optimizations_perf_metrics, list) + assert len(result.optimizations_perf_metrics) == 1 + + opt, metrics = result.optimizations_perf_metrics[0] + assert opt == [OptimizationSettings("pruning", 0.5, None)] + assert isinstance(metrics, PerformanceMetrics) + + collector_no_optimizations = EthosUOptimizationPerformance( + test_keras_model, + device, + [], + ) + with pytest.raises(FunctionalityNotSupportedError): + collector_no_optimizations.collect_data() + + collector_tflite = EthosUOptimizationPerformance( + test_tflite_model, + device, + [ + [ + {"optimization_type": "pruning", "optimization_target": 0.5}, + ] + ], + ) + collector_tflite.set_context(sample_context) + with pytest.raises(FunctionalityNotSupportedError): + collector_tflite.collect_data() + + with pytest.raises( + Exception, match="Optimization parameters expected to be a list" + ): + collector_bad_config = EthosUOptimizationPerformance( + test_keras_model, device, {"optimization_type": "pruning"} # type: ignore + ) + collector.set_context(sample_context) + collector_bad_config.collect_data() + + +def mock_performance_estimation( + monkeypatch: pytest.MonkeyPatch, device: EthosUConfiguration +) -> None: + """Mock performance estimation.""" + metrics = PerformanceMetrics( + device, + NPUCycles(1, 2, 3, 4, 5, 6), + MemoryUsage(1, 2, 3, 4, 5), + ) + monkeypatch.setattr( + "mlia.target.ethos_u.data_collection.EthosUPerformanceEstimator.estimate", + MagicMock(return_value=metrics), + ) diff --git a/tests/test_target_ethos_u_performance.py b/tests/test_target_ethos_u_performance.py new file mode 100644 index 0000000..76860b5 --- /dev/null +++ b/tests/test_target_ethos_u_performance.py @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Performance estimation tests.""" +from unittest.mock import MagicMock + +import pytest + +from mlia.target.ethos_u.performance import MemorySizeType +from mlia.target.ethos_u.performance import MemoryUsage + + +def test_memory_usage_conversion() -> None: + """Test MemoryUsage objects conversion.""" + memory_usage_in_kb = MemoryUsage(1, 2, 3, 4, 5, MemorySizeType.KILOBYTES) + assert memory_usage_in_kb.in_kilobytes() == memory_usage_in_kb + + memory_usage_in_bytes = MemoryUsage( + 1 * 1024, 2 * 1024, 3 * 1024, 4 * 1024, 5 * 1024 + ) + assert memory_usage_in_bytes.in_kilobytes() == memory_usage_in_kb + + +def mock_performance_estimation(monkeypatch: pytest.MonkeyPatch) -> None: + """Mock performance estimation.""" + monkeypatch.setattr( + "mlia.backend.corstone.performance.estimate_performance", + MagicMock(return_value=MagicMock()), + ) diff --git a/tests/test_target_ethos_u_reporters.py b/tests/test_target_ethos_u_reporters.py new file mode 100644 index 0000000..7f372bf --- /dev/null +++ b/tests/test_target_ethos_u_reporters.py @@ -0,0 +1,353 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for reports module.""" +from __future__ import annotations + +import json +import sys +from contextlib import ExitStack as doesnt_raise +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Literal + +import pytest + +from mlia.backend.vela.compat import NpuSupported +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators +from mlia.core.reporting import get_reporter +from mlia.core.reporting import produce_report +from mlia.core.reporting import Report +from mlia.core.reporting import Reporter +from mlia.core.reporting import Table +from mlia.target.ethos_u.config import EthosUConfiguration +from mlia.target.ethos_u.performance import MemoryUsage +from mlia.target.ethos_u.performance import NPUCycles +from mlia.target.ethos_u.performance import PerformanceMetrics +from mlia.target.ethos_u.reporters import ethos_u_formatters +from mlia.target.ethos_u.reporters import report_device_details +from mlia.target.ethos_u.reporters import report_operators +from mlia.target.ethos_u.reporters import report_perf_metrics +from mlia.utils.console import remove_ascii_codes + + +@pytest.mark.parametrize( + "data, formatters", + [ + ( + [Operator("test_operator", "test_type", NpuSupported(False, []))], + [report_operators], + ), + ( + PerformanceMetrics( + EthosUConfiguration("ethos-u55-256"), + NPUCycles(0, 0, 0, 0, 0, 0), + MemoryUsage(0, 0, 0, 0, 0), + ), + [report_perf_metrics], + ), + ], +) +@pytest.mark.parametrize( + "fmt, output, expected_error", + [ + [ + "unknown_format", + sys.stdout, + pytest.raises(Exception, match="Unknown format unknown_format"), + ], + [ + "plain_text", + sys.stdout, + doesnt_raise(), + ], + [ + "json", + sys.stdout, + doesnt_raise(), + ], + [ + "plain_text", + "report.txt", + doesnt_raise(), + ], + [ + "json", + "report.json", + doesnt_raise(), + ], + ], +) +def test_report( + data: Any, + formatters: list[Callable], + fmt: Literal["plain_text", "json"], + output: Any, + expected_error: Any, + tmp_path: Path, +) -> None: + """Test report function.""" + if is_file := isinstance(output, str): + output = tmp_path / output + + for formatter in formatters: + with expected_error: + produce_report(data, formatter, fmt, output) + + if is_file: + assert output.is_file() + assert output.stat().st_size > 0 + + +@pytest.mark.parametrize( + "ops, expected_plain_text, expected_json_dict", + [ + ( + [ + Operator( + "npu_supported", + "test_type", + NpuSupported(True, []), + ), + Operator( + "cpu_only", + "test_type", + NpuSupported( + False, + [ + ( + "CPU only operator", + "", + ), + ], + ), + ), + Operator( + "npu_unsupported", + "test_type", + NpuSupported( + False, + [ + ( + "Not supported operator", + "Reason why operator is not supported", + ) + ], + ), + ), + ], + """ +Operators: +┌───┬─────────────────┬───────────────┬───────────┬───────────────────────────────┐ +│ # │ Operator name │ Operator type │ Placement │ Notes │ +╞═══╪═════════════════╪═══════════════╪═══════════╪═══════════════════════════════╡ +│ 1 │ npu_supported │ test_type │ NPU │ │ +├───┼─────────────────┼───────────────┼───────────┼───────────────────────────────┤ +│ 2 │ cpu_only │ test_type │ CPU │ * CPU only operator │ +├───┼─────────────────┼───────────────┼───────────┼───────────────────────────────┤ +│ 3 │ npu_unsupported │ test_type │ CPU │ * Not supported operator │ +│ │ │ │ │ │ +│ │ │ │ │ * Reason why operator is not │ +│ │ │ │ │ supported │ +└───┴─────────────────┴───────────────┴───────────┴───────────────────────────────┘ +""".strip(), + { + "operators": [ + { + "operator_name": "npu_supported", + "operator_type": "test_type", + "placement": "NPU", + "notes": [], + }, + { + "operator_name": "cpu_only", + "operator_type": "test_type", + "placement": "CPU", + "notes": [{"note": "CPU only operator"}], + }, + { + "operator_name": "npu_unsupported", + "operator_type": "test_type", + "placement": "CPU", + "notes": [ + {"note": "Not supported operator"}, + {"note": "Reason why operator is not supported"}, + ], + }, + ] + }, + ), + ], +) +def test_report_operators( + ops: list[Operator], + expected_plain_text: str, + expected_json_dict: dict, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test report_operatos formatter.""" + # make terminal wide enough to print whole table + monkeypatch.setenv("COLUMNS", "100") + + report = report_operators(ops) + assert isinstance(report, Table) + + plain_text = remove_ascii_codes(report.to_plain_text()) + assert plain_text == expected_plain_text + + json_dict = report.to_json() + assert json_dict == expected_json_dict + + +@pytest.mark.parametrize( + "device, expected_plain_text, expected_json_dict", + [ + [ + EthosUConfiguration("ethos-u55-256"), + """Device information: + Target ethos-u55 + MAC 256 + + Memory mode Shared_Sram + Const mem area Axi1 + Arena mem area Axi0 + Cache mem area Axi0 + Arena cache size 2,096,768 bytes + + System config Ethos_U55_High_End_Embedded + Accelerator clock 500,000,000 Hz + AXI0 port Sram + AXI1 port OffChipFlash + + Memory area settings: + Sram: + Clock scales 1.0 + Burst length 32 bytes + Read latency 32 cycles + Write latency 32 cycles + + Dram: + Clock scales 1.0 + Burst length 1 byte + Read latency 0 cycles + Write latency 0 cycles + + OnChipFlash: + Clock scales 1.0 + Burst length 1 byte + Read latency 0 cycles + Write latency 0 cycles + + OffChipFlash: + Clock scales 0.125 + Burst length 128 bytes + Read latency 64 cycles + Write latency 64 cycles + + Architecture settings: + Permanent storage mem area OffChipFlash + Feature map storage mem area Sram + Fast storage mem area Sram""", + { + "device": { + "target": "ethos-u55", + "mac": 256, + "memory_mode": { + "const_mem_area": "Axi1", + "arena_mem_area": "Axi0", + "cache_mem_area": "Axi0", + "arena_cache_size": {"value": 2096768, "unit": "bytes"}, + }, + "system_config": { + "accelerator_clock": {"value": 500000000.0, "unit": "Hz"}, + "axi0_port": "Sram", + "axi1_port": "OffChipFlash", + "memory_area": { + "Sram": { + "clock_scales": 1.0, + "burst_length": {"value": 32, "unit": "bytes"}, + "read_latency": {"value": 32, "unit": "cycles"}, + "write_latency": {"value": 32, "unit": "cycles"}, + }, + "Dram": { + "clock_scales": 1.0, + "burst_length": {"value": 1, "unit": "byte"}, + "read_latency": {"value": 0, "unit": "cycles"}, + "write_latency": {"value": 0, "unit": "cycles"}, + }, + "OnChipFlash": { + "clock_scales": 1.0, + "burst_length": {"value": 1, "unit": "byte"}, + "read_latency": {"value": 0, "unit": "cycles"}, + "write_latency": {"value": 0, "unit": "cycles"}, + }, + "OffChipFlash": { + "clock_scales": 0.125, + "burst_length": {"value": 128, "unit": "bytes"}, + "read_latency": {"value": 64, "unit": "cycles"}, + "write_latency": {"value": 64, "unit": "cycles"}, + }, + }, + }, + "arch_settings": { + "permanent_storage_mem_area": "OffChipFlash", + "feature_map_storage_mem_area": "Sram", + "fast_storage_mem_area": "Sram", + }, + } + }, + ], + ], +) +def test_report_device_details( + device: EthosUConfiguration, + expected_plain_text: str, + expected_json_dict: dict, +) -> None: + """Test report_operatos formatter.""" + report = report_device_details(device) + assert isinstance(report, Report) + + plain_text = report.to_plain_text() + assert plain_text == expected_plain_text + + json_dict = report.to_json() + assert json_dict == expected_json_dict + + +def test_get_reporter(tmp_path: Path) -> None: + """Test reporter functionality.""" + ops = Operators( + [ + Operator( + "npu_supported", + "op_type", + NpuSupported(True, []), + ), + ] + ) + + output = tmp_path / "output.json" + with get_reporter("json", output, ethos_u_formatters) as reporter: + assert isinstance(reporter, Reporter) + + with pytest.raises( + Exception, match="Unable to find appropriate formatter for some_data" + ): + reporter.submit("some_data") + + reporter.submit(ops) + + with open(output, encoding="utf-8") as file: + json_data = json.load(file) + + assert json_data == { + "operators_stats": [ + { + "npu_unsupported_ratio": 0.0, + "num_of_npu_supported_operators": 1, + "num_of_operators": 1, + } + ] + } diff --git a/tests/test_target_tosa_advice_generation.py b/tests/test_target_tosa_advice_generation.py new file mode 100644 index 0000000..e8e06f8 --- /dev/null +++ b/tests/test_target_tosa_advice_generation.py @@ -0,0 +1,56 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for advice generation.""" +from __future__ import annotations + +import pytest + +from mlia.core.advice_generation import Advice +from mlia.core.common import AdviceCategory +from mlia.core.common import DataItem +from mlia.core.context import ExecutionContext +from mlia.target.tosa.advice_generation import TOSAAdviceProducer +from mlia.target.tosa.data_analysis import ModelIsNotTOSACompatible +from mlia.target.tosa.data_analysis import ModelIsTOSACompatible + + +@pytest.mark.parametrize( + "input_data, advice_category, expected_advice", + [ + [ + ModelIsNotTOSACompatible(), + AdviceCategory.OPERATORS, + [ + Advice( + [ + "Some operators in the model are not TOSA compatible. " + "Please, refer to the operators table for more information." + ] + ) + ], + ], + [ + ModelIsTOSACompatible(), + AdviceCategory.OPERATORS, + [Advice(["Model is fully TOSA compatible."])], + ], + ], +) +def test_tosa_advice_producer( + tmpdir: str, + input_data: DataItem, + advice_category: AdviceCategory, + expected_advice: list[Advice], +) -> None: + """Test TOSA advice producer.""" + producer = TOSAAdviceProducer() + + context = ExecutionContext( + advice_category=advice_category, + working_dir=tmpdir, + ) + + producer.set_context(context) + producer.produce_advice(input_data) + + assert producer.get_advice() == expected_advice diff --git a/tests/test_target_tosa_advisor.py b/tests/test_target_tosa_advisor.py new file mode 100644 index 0000000..32a6b77 --- /dev/null +++ b/tests/test_target_tosa_advisor.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for TOSA advisor.""" +from pathlib import Path + +from mlia.core.context import ExecutionContext +from mlia.core.workflow import DefaultWorkflowExecutor +from mlia.target.tosa.advisor import configure_and_get_tosa_advisor +from mlia.target.tosa.advisor import TOSAInferenceAdvisor + + +def test_configure_and_get_tosa_advisor(test_tflite_model: Path) -> None: + """Test TOSA advisor configuration.""" + ctx = ExecutionContext() + + advisor = configure_and_get_tosa_advisor(ctx, "tosa", test_tflite_model) + workflow = advisor.configure(ctx) + + assert isinstance(advisor, TOSAInferenceAdvisor) + + assert ctx.event_handlers is not None + assert ctx.config_parameters == { + "tosa_inference_advisor": { + "model": str(test_tflite_model), + "target_profile": "tosa", + } + } + + assert isinstance(workflow, DefaultWorkflowExecutor) diff --git a/tests/test_target_tosa_data_analysis.py b/tests/test_target_tosa_data_analysis.py new file mode 100644 index 0000000..41e977f --- /dev/null +++ b/tests/test_target_tosa_data_analysis.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for TOSA data analysis module.""" +from __future__ import annotations + +import pytest + +from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo +from mlia.core.common import DataItem +from mlia.core.data_analysis import Fact +from mlia.target.tosa.data_analysis import ModelIsNotTOSACompatible +from mlia.target.tosa.data_analysis import ModelIsTOSACompatible +from mlia.target.tosa.data_analysis import TOSADataAnalyzer + + +@pytest.mark.parametrize( + "input_data, expected_facts", + [ + [ + TOSACompatibilityInfo(True, []), + [ModelIsTOSACompatible()], + ], + [ + TOSACompatibilityInfo(False, []), + [ModelIsNotTOSACompatible()], + ], + ], +) +def test_tosa_data_analyzer(input_data: DataItem, expected_facts: list[Fact]) -> None: + """Test TOSA data analyzer.""" + analyzer = TOSADataAnalyzer() + analyzer.analyze_data(input_data) + assert analyzer.get_analyzed_data() == expected_facts diff --git a/tests/test_target_tosa_data_collection.py b/tests/test_target_tosa_data_collection.py new file mode 100644 index 0000000..9d590ca --- /dev/null +++ b/tests/test_target_tosa_data_collection.py @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for TOSA data collection module.""" +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.tosa_checker.compat import TOSACompatibilityInfo +from mlia.core.context import ExecutionContext +from mlia.target.tosa.data_collection import TOSAOperatorCompatibility + + +def test_tosa_data_collection( + monkeypatch: pytest.MonkeyPatch, test_tflite_model: Path, tmpdir: str +) -> None: + """Test TOSA data collection.""" + monkeypatch.setattr( + "mlia.target.tosa.data_collection.get_tosa_compatibility_info", + MagicMock(return_value=TOSACompatibilityInfo(True, [])), + ) + context = ExecutionContext(working_dir=tmpdir) + collector = TOSAOperatorCompatibility(test_tflite_model) + collector.set_context(context) + + data_item = collector.collect_data() + + assert isinstance(data_item, TOSACompatibilityInfo) diff --git a/tests/test_utils_py_manager.py b/tests/test_utils_py_manager.py new file mode 100644 index 0000000..e41680d --- /dev/null +++ b/tests/test_utils_py_manager.py @@ -0,0 +1,73 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for python package manager.""" +import sys +from unittest.mock import MagicMock + +import pytest + +from mlia.utils.py_manager import get_package_manager +from mlia.utils.py_manager import PyPackageManager + + +def test_get_package_manager() -> None: + """Test function get_package_manager.""" + manager = get_package_manager() + assert isinstance(manager, PyPackageManager) + + +@pytest.fixture(name="mock_check_call") +def mock_check_call_fixture(monkeypatch: pytest.MonkeyPatch) -> MagicMock: + """Mock check_call function.""" + mock_check_call = MagicMock() + monkeypatch.setattr("mlia.utils.py_manager.check_call", mock_check_call) + + return mock_check_call + + +def test_py_package_manager_metadata() -> None: + """Test getting package status.""" + manager = PyPackageManager() + assert manager.package_installed("pytest") + assert manager.packages_installed(["pytest", "mlia"]) + + +def test_py_package_manager_install(mock_check_call: MagicMock) -> None: + """Test package installation.""" + manager = PyPackageManager() + with pytest.raises(ValueError, match="No package names provided"): + manager.install([]) + + manager.install(["mlia", "pytest"]) + mock_check_call.assert_called_once_with( + [ + sys.executable, + "-m", + "pip", + "--disable-pip-version-check", + "install", + "mlia", + "pytest", + ] + ) + + +def test_py_package_manager_uninstall(mock_check_call: MagicMock) -> None: + """Test package removal.""" + manager = PyPackageManager() + with pytest.raises(ValueError, match="No package names provided"): + manager.uninstall([]) + + manager.uninstall(["mlia", "pytest"]) + mock_check_call.assert_called_once_with( + [ + sys.executable, + "-m", + "pip", + "--disable-pip-version-check", + "uninstall", + "--yes", + "mlia", + "pytest", + ] + ) -- cgit v1.2.1