aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2022-11-18 16:34:03 +0000
committerDmitrii Agibov <dmitrii.agibov@arm.com>2022-11-29 14:44:13 +0000
commit37959522a805a5e23c930ed79aac84920c3cb208 (patch)
tree484af1240a93c955a72ce2e452432383b6704b56
parent5568f9f000d673ac53e710dcc8991fec6e8a5488 (diff)
downloadmlia-37959522a805a5e23c930ed79aac84920c3cb208.tar.gz
Move backends functionality into separate modules
- Move backend management/executor code into module backend_core - Create separate module for each backend in "backend" module - Move each backend into corresponding module - Split Vela wrapper into several submodules Change-Id: If01b6774aab6501951212541cc5d7f5aa7c97e95
-rw-r--r--src/mlia/backend/__init__.py2
-rw-r--r--src/mlia/backend/corstone/__init__.py (renamed from src/mlia/tools/metadata/__init__.py)2
-rw-r--r--src/mlia/backend/corstone/install.py155
-rw-r--r--src/mlia/backend/corstone/performance.py233
-rw-r--r--src/mlia/backend/executor/__init__.py (renamed from src/mlia/tools/__init__.py)2
-rw-r--r--src/mlia/backend/executor/application.py (renamed from src/mlia/backend/application.py)24
-rw-r--r--src/mlia/backend/executor/common.py (renamed from src/mlia/backend/common.py)16
-rw-r--r--src/mlia/backend/executor/config.py (renamed from src/mlia/backend/config.py)0
-rw-r--r--src/mlia/backend/executor/execution.py (renamed from src/mlia/backend/execution.py)14
-rw-r--r--src/mlia/backend/executor/fs.py (renamed from src/mlia/backend/fs.py)0
-rw-r--r--src/mlia/backend/executor/output_consumer.py (renamed from src/mlia/backend/output_consumer.py)0
-rw-r--r--src/mlia/backend/executor/proc.py (renamed from src/mlia/backend/proc.py)2
-rw-r--r--src/mlia/backend/executor/runner.py98
-rw-r--r--src/mlia/backend/executor/source.py (renamed from src/mlia/backend/source.py)14
-rw-r--r--src/mlia/backend/executor/system.py (renamed from src/mlia/backend/system.py)22
-rw-r--r--src/mlia/backend/install.py (renamed from src/mlia/tools/metadata/corstone.py)317
-rw-r--r--src/mlia/backend/manager.py505
-rw-r--r--src/mlia/backend/tosa_checker/__init__.py3
-rw-r--r--src/mlia/backend/tosa_checker/install.py19
-rw-r--r--src/mlia/backend/vela/__init__.py3
-rw-r--r--src/mlia/backend/vela/compat.py158
-rw-r--r--src/mlia/backend/vela/compiler.py (renamed from src/mlia/tools/vela_wrapper.py)225
-rw-r--r--src/mlia/backend/vela/performance.py97
-rw-r--r--src/mlia/cli/config.py15
-rw-r--r--src/mlia/devices/ethosu/config.py4
-rw-r--r--src/mlia/devices/ethosu/data_analysis.py2
-rw-r--r--src/mlia/devices/ethosu/data_collection.py4
-rw-r--r--src/mlia/devices/ethosu/handlers.py2
-rw-r--r--src/mlia/devices/ethosu/operators.py4
-rw-r--r--src/mlia/devices/ethosu/performance.py25
-rw-r--r--src/mlia/devices/ethosu/reporters.py4
-rw-r--r--src/mlia/tools/metadata/common.py322
-rw-r--r--src/mlia/tools/metadata/py_package.py84
-rw-r--r--tests/conftest.py6
-rw-r--r--tests/test_api.py4
-rw-r--r--tests/test_backend_corstone_install.py (renamed from tests/test_tools_metadata_corstone.py)50
-rw-r--r--tests/test_backend_corstone_performance.py519
-rw-r--r--tests/test_backend_executor_application.py (renamed from tests/test_backend_application.py)36
-rw-r--r--tests/test_backend_executor_common.py (renamed from tests/test_backend_common.py)34
-rw-r--r--tests/test_backend_executor_execution.py (renamed from tests/test_backend_execution.py)24
-rw-r--r--tests/test_backend_executor_fs.py (renamed from tests/test_backend_fs.py)24
-rw-r--r--tests/test_backend_executor_output_consumer.py (renamed from tests/test_backend_output_consumer.py)4
-rw-r--r--tests/test_backend_executor_proc.py (renamed from tests/test_backend_proc.py)21
-rw-r--r--tests/test_backend_executor_runner.py254
-rw-r--r--tests/test_backend_executor_source.py (renamed from tests/test_backend_source.py)15
-rw-r--r--tests/test_backend_executor_system.py (renamed from tests/test_backend_system.py)28
-rw-r--r--tests/test_backend_install.py124
-rw-r--r--tests/test_backend_manager.py930
-rw-r--r--tests/test_backend_tosa_checker_install.py (renamed from tests/test_tools_metadata_py_package.py)22
-rw-r--r--tests/test_backend_vela_compat.py74
-rw-r--r--tests/test_backend_vela_compiler.py (renamed from tests/test_tools_vela_wrapper.py)132
-rw-r--r--tests/test_backend_vela_performance.py64
-rw-r--r--tests/test_cli_commands.py2
-rw-r--r--tests/test_devices_ethosu_config.py2
-rw-r--r--tests/test_devices_ethosu_data_analysis.py6
-rw-r--r--tests/test_devices_ethosu_data_collection.py2
-rw-r--r--tests/test_devices_ethosu_performance.py2
-rw-r--r--tests/test_devices_ethosu_reporters.py6
-rw-r--r--tests/test_tools_metadata_common.py282
59 files changed, 2641 insertions, 2403 deletions
diff --git a/src/mlia/backend/__init__.py b/src/mlia/backend/__init__.py
index 3d60372..745aa1b 100644
--- a/src/mlia/backend/__init__.py
+++ b/src/mlia/backend/__init__.py
@@ -1,3 +1,3 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Backend module."""
+"""Backends module."""
diff --git a/src/mlia/tools/metadata/__init__.py b/src/mlia/backend/corstone/__init__.py
index f877e4f..a1eac14 100644
--- a/src/mlia/tools/metadata/__init__.py
+++ b/src/mlia/backend/corstone/__init__.py
@@ -1,3 +1,3 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Module for the tools metadata."""
+"""Corstone backend module."""
diff --git a/src/mlia/backend/corstone/install.py b/src/mlia/backend/corstone/install.py
new file mode 100644
index 0000000..2a0e5c9
--- /dev/null
+++ b/src/mlia/backend/corstone/install.py
@@ -0,0 +1,155 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Module for Corstone based FVPs.
+
+The import of subprocess module raises a B404 bandit error. MLIA usage of
+subprocess is needed and can be considered safe hence disabling the security
+check.
+"""
+from __future__ import annotations
+
+import logging
+import subprocess # nosec
+from pathlib import Path
+
+from mlia.backend.executor.runner import BackendRunner
+from mlia.backend.install import BackendInstallation
+from mlia.backend.install import BackendMetadata
+from mlia.backend.install import CompoundPathChecker
+from mlia.backend.install import Installation
+from mlia.backend.install import PackagePathChecker
+from mlia.backend.install import StaticPathChecker
+from mlia.utils.download import DownloadArtifact
+from mlia.utils.filesystem import working_directory
+
+
+logger = logging.getLogger(__name__)
+
+
+class Corstone300Installer:
+ """Helper class that wraps Corstone 300 installation logic."""
+
+ def __call__(self, eula_agreement: bool, dist_dir: Path) -> Path:
+ """Install Corstone-300 and return path to the models."""
+ with working_directory(dist_dir):
+ install_dir = "corstone-300"
+ try:
+ fvp_install_cmd = [
+ "./FVP_Corstone_SSE-300.sh",
+ "-q",
+ "-d",
+ install_dir,
+ ]
+ if not eula_agreement:
+ fvp_install_cmd += [
+ "--nointeractive",
+ "--i-agree-to-the-contained-eula",
+ ]
+
+ # The following line raises a B603 error for bandit. In this
+ # specific case, the input is pretty much static and cannot be
+ # changed byt the user hence disabling the security check for
+ # this instance
+ subprocess.check_call(fvp_install_cmd) # nosec
+ except subprocess.CalledProcessError as err:
+ raise Exception(
+ "Error occurred during Corstone-300 installation"
+ ) from err
+
+ return dist_dir / install_dir
+
+
+def get_corstone_300_installation() -> Installation:
+ """Get Corstone-300 installation."""
+ corstone_300 = BackendInstallation(
+ backend_runner=BackendRunner(),
+ # pylint: disable=line-too-long
+ metadata=BackendMetadata(
+ name="Corstone-300",
+ description="Corstone-300 FVP",
+ system_config="backend_configs/systems/corstone-300/backend-config.json",
+ apps_resources=[],
+ fvp_dir_name="corstone_300",
+ download_artifact=DownloadArtifact(
+ name="Corstone-300 FVP",
+ url="https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz",
+ filename="FVP_Corstone_SSE-300_11.16_26.tgz",
+ version="11.16_26",
+ sha256_hash="e26139be756b5003a30d978c629de638aed1934d597dc24a17043d4708e934d7",
+ ),
+ supported_platforms=["Linux"],
+ ),
+ # pylint: enable=line-too-long
+ path_checker=CompoundPathChecker(
+ PackagePathChecker(
+ expected_files=[
+ "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55",
+ "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U65",
+ ],
+ backend_subfolder="models/Linux64_GCC-6.4",
+ ),
+ StaticPathChecker(
+ static_backend_path=Path("/opt/VHT"),
+ expected_files=[
+ "VHT_Corstone_SSE-300_Ethos-U55",
+ "VHT_Corstone_SSE-300_Ethos-U65",
+ ],
+ copy_source=False,
+ system_config=(
+ "backend_configs/systems/corstone-300-vht/backend-config.json"
+ ),
+ ),
+ ),
+ backend_installer=Corstone300Installer(),
+ )
+
+ return corstone_300
+
+
+def get_corstone_310_installation() -> Installation:
+ """Get Corstone-310 installation."""
+ corstone_310 = BackendInstallation(
+ backend_runner=BackendRunner(),
+ # pylint: disable=line-too-long
+ metadata=BackendMetadata(
+ name="Corstone-310",
+ description="Corstone-310 FVP",
+ system_config="backend_configs/systems/corstone-310/backend-config.json",
+ apps_resources=[],
+ fvp_dir_name="corstone_310",
+ download_artifact=None,
+ supported_platforms=["Linux"],
+ ),
+ # pylint: enable=line-too-long
+ path_checker=CompoundPathChecker(
+ PackagePathChecker(
+ expected_files=[
+ "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310",
+ "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310_Ethos-U65",
+ ],
+ backend_subfolder="models/Linux64_GCC-9.3",
+ ),
+ StaticPathChecker(
+ static_backend_path=Path("/opt/VHT"),
+ expected_files=[
+ "VHT_Corstone_SSE-310",
+ "VHT_Corstone_SSE-310_Ethos-U65",
+ ],
+ copy_source=False,
+ system_config=(
+ "backend_configs/systems/corstone-310-vht/backend-config.json"
+ ),
+ ),
+ ),
+ backend_installer=None,
+ )
+
+ return corstone_310
+
+
+def get_corstone_installations() -> list[Installation]:
+ """Get Corstone installations."""
+ return [
+ get_corstone_300_installation(),
+ get_corstone_310_installation(),
+ ]
diff --git a/src/mlia/backend/corstone/performance.py b/src/mlia/backend/corstone/performance.py
new file mode 100644
index 0000000..5aabfa5
--- /dev/null
+++ b/src/mlia/backend/corstone/performance.py
@@ -0,0 +1,233 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Module for backend integration."""
+from __future__ import annotations
+
+import logging
+from abc import ABC
+from abc import abstractmethod
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Literal
+
+from mlia.backend.executor.output_consumer import Base64OutputConsumer
+from mlia.backend.executor.output_consumer import OutputConsumer
+from mlia.backend.executor.runner import BackendRunner
+from mlia.backend.executor.runner import ExecutionParams
+from mlia.backend.install import get_application_name
+from mlia.backend.install import get_system_name
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class DeviceInfo:
+ """Device information."""
+
+ device_type: Literal["ethos-u55", "ethos-u65"]
+ mac: int
+
+
+@dataclass
+class ModelInfo:
+ """Model info."""
+
+ model_path: Path
+
+
+@dataclass
+class PerformanceMetrics:
+ """Performance metrics parsed from generic inference output."""
+
+ npu_active_cycles: int
+ npu_idle_cycles: int
+ npu_total_cycles: int
+ npu_axi0_rd_data_beat_received: int
+ npu_axi0_wr_data_beat_written: int
+ npu_axi1_rd_data_beat_received: int
+
+
+class LogWriter(OutputConsumer):
+ """Redirect output to the logger."""
+
+ def feed(self, line: str) -> bool:
+ """Process line from the output."""
+ logger.debug(line.strip())
+ return False
+
+
+class GenericInferenceOutputParser(Base64OutputConsumer):
+ """Generic inference app output parser."""
+
+ def __init__(self) -> None:
+ """Init generic inference output parser instance."""
+ super().__init__()
+ self._map = {
+ "NPU ACTIVE": "npu_active_cycles",
+ "NPU IDLE": "npu_idle_cycles",
+ "NPU TOTAL": "npu_total_cycles",
+ "NPU AXI0_RD_DATA_BEAT_RECEIVED": "npu_axi0_rd_data_beat_received",
+ "NPU AXI0_WR_DATA_BEAT_WRITTEN": "npu_axi0_wr_data_beat_written",
+ "NPU AXI1_RD_DATA_BEAT_RECEIVED": "npu_axi1_rd_data_beat_received",
+ }
+
+ @property
+ def result(self) -> dict:
+ """Merge the raw results and map the names to the right output names."""
+ merged_result = {}
+ for raw_result in self.parsed_output:
+ for profiling_result in raw_result:
+ for sample in profiling_result["samples"]:
+ name, values = (sample["name"], sample["value"])
+ if name in merged_result:
+ raise KeyError(
+ f"Duplicate key '{name}' in base64 output.",
+ )
+ new_name = self._map[name]
+ merged_result[new_name] = values[0]
+ return merged_result
+
+ def is_ready(self) -> bool:
+ """Return true if all expected data has been parsed."""
+ return set(self.result.keys()) == set(self._map.values())
+
+ def missed_keys(self) -> set[str]:
+ """Return a set of the keys that have not been found in the output."""
+ return set(self._map.values()) - set(self.result.keys())
+
+
+class GenericInferenceRunner(ABC):
+ """Abstract class for generic inference runner."""
+
+ def __init__(self, backend_runner: BackendRunner):
+ """Init generic inference runner instance."""
+ self.backend_runner = backend_runner
+
+ def run(
+ self, model_info: ModelInfo, output_consumers: list[OutputConsumer]
+ ) -> None:
+ """Run generic inference for the provided device/model."""
+ execution_params = self.get_execution_params(model_info)
+
+ ctx = self.backend_runner.run_application(execution_params)
+ if ctx.stdout is not None:
+ ctx.stdout = self.consume_output(ctx.stdout, output_consumers)
+
+ @abstractmethod
+ def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams:
+ """Get execution params for the provided model."""
+
+ def check_system_and_application(self, system_name: str, app_name: str) -> None:
+ """Check if requested system and application installed."""
+ if not self.backend_runner.is_system_installed(system_name):
+ raise Exception(f"System {system_name} is not installed")
+
+ if not self.backend_runner.is_application_installed(app_name, system_name):
+ raise Exception(
+ f"Application {app_name} for the system {system_name} "
+ "is not installed"
+ )
+
+ @staticmethod
+ def consume_output(output: bytearray, consumers: list[OutputConsumer]) -> bytearray:
+ """
+ Pass program's output to the consumers and filter it.
+
+ Returns the filtered output.
+ """
+ filtered_output = bytearray()
+ for line_bytes in output.splitlines():
+ line = line_bytes.decode("utf-8")
+ remove_line = False
+ for consumer in consumers:
+ if consumer.feed(line):
+ remove_line = True
+ if not remove_line:
+ filtered_output.extend(line_bytes)
+
+ return filtered_output
+
+
+class GenericInferenceRunnerEthosU(GenericInferenceRunner):
+ """Generic inference runner on U55/65."""
+
+ def __init__(
+ self, backend_runner: BackendRunner, device_info: DeviceInfo, backend: str
+ ) -> None:
+ """Init generic inference runner instance."""
+ super().__init__(backend_runner)
+
+ system_name, app_name = self.resolve_system_and_app(device_info, backend)
+ self.system_name = system_name
+ self.app_name = app_name
+ self.device_info = device_info
+
+ @staticmethod
+ def resolve_system_and_app(
+ device_info: DeviceInfo, backend: str
+ ) -> tuple[str, str]:
+ """Find appropriate system and application for the provided device/backend."""
+ try:
+ system_name = get_system_name(backend, device_info.device_type)
+ except KeyError as ex:
+ raise RuntimeError(
+ f"Unsupported device {device_info.device_type} "
+ f"for backend {backend}"
+ ) from ex
+
+ try:
+ app_name = get_application_name(system_name)
+ except KeyError as err:
+ raise RuntimeError(f"System {system_name} is not installed") from err
+
+ return system_name, app_name
+
+ def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams:
+ """Get execution params for Ethos-U55/65."""
+ self.check_system_and_application(self.system_name, self.app_name)
+
+ system_params = [
+ f"mac={self.device_info.mac}",
+ f"input_file={model_info.model_path.absolute()}",
+ ]
+
+ return ExecutionParams(
+ self.app_name,
+ self.system_name,
+ [],
+ system_params,
+ )
+
+
+def get_generic_runner(device_info: DeviceInfo, backend: str) -> GenericInferenceRunner:
+ """Get generic runner for provided device and backend."""
+ backend_runner = get_backend_runner()
+ return GenericInferenceRunnerEthosU(backend_runner, device_info, backend)
+
+
+def estimate_performance(
+ model_info: ModelInfo, device_info: DeviceInfo, backend: str
+) -> PerformanceMetrics:
+ """Get performance estimations."""
+ output_parser = GenericInferenceOutputParser()
+ output_consumers = [output_parser, LogWriter()]
+
+ generic_runner = get_generic_runner(device_info, backend)
+ generic_runner.run(model_info, output_consumers)
+
+ if not output_parser.is_ready():
+ missed_data = ",".join(output_parser.missed_keys())
+ logger.debug("Unable to get performance metrics, missed data %s", missed_data)
+ raise Exception("Unable to get performance metrics, insufficient data")
+
+ return PerformanceMetrics(**output_parser.result)
+
+
+def get_backend_runner() -> BackendRunner:
+ """
+ Return BackendRunner instance.
+
+ Note: This is needed for the unit tests.
+ """
+ return BackendRunner()
diff --git a/src/mlia/tools/__init__.py b/src/mlia/backend/executor/__init__.py
index 184e966..3d60372 100644
--- a/src/mlia/tools/__init__.py
+++ b/src/mlia/backend/executor/__init__.py
@@ -1,3 +1,3 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Tools module."""
+"""Backend module."""
diff --git a/src/mlia/backend/application.py b/src/mlia/backend/executor/application.py
index a5d99f7..738ac4e 100644
--- a/src/mlia/backend/application.py
+++ b/src/mlia/backend/executor/application.py
@@ -9,18 +9,18 @@ from typing import Any
from typing import cast
from typing import List
-from mlia.backend.common import Backend
-from mlia.backend.common import ConfigurationException
-from mlia.backend.common import get_backend_configs
-from mlia.backend.common import get_backend_directories
-from mlia.backend.common import load_application_configs
-from mlia.backend.common import load_config
-from mlia.backend.common import remove_backend
-from mlia.backend.config import ApplicationConfig
-from mlia.backend.config import ExtendedApplicationConfig
-from mlia.backend.fs import get_backends_path
-from mlia.backend.source import create_destination_and_install
-from mlia.backend.source import get_source
+from mlia.backend.executor.common import Backend
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.common import get_backend_configs
+from mlia.backend.executor.common import get_backend_directories
+from mlia.backend.executor.common import load_application_configs
+from mlia.backend.executor.common import load_config
+from mlia.backend.executor.common import remove_backend
+from mlia.backend.executor.config import ApplicationConfig
+from mlia.backend.executor.config import ExtendedApplicationConfig
+from mlia.backend.executor.fs import get_backends_path
+from mlia.backend.executor.source import create_destination_and_install
+from mlia.backend.executor.source import get_source
def get_available_application_directory_names() -> list[str]:
diff --git a/src/mlia/backend/common.py b/src/mlia/backend/executor/common.py
index 0f04553..48dbd4a 100644
--- a/src/mlia/backend/common.py
+++ b/src/mlia/backend/executor/common.py
@@ -19,14 +19,14 @@ from typing import Match
from typing import NamedTuple
from typing import Pattern
-from mlia.backend.config import BackendConfig
-from mlia.backend.config import BaseBackendConfig
-from mlia.backend.config import NamedExecutionConfig
-from mlia.backend.config import UserParamConfig
-from mlia.backend.config import UserParamsConfig
-from mlia.backend.fs import get_backends_path
-from mlia.backend.fs import remove_resource
-from mlia.backend.fs import ResourceType
+from mlia.backend.executor.config import BackendConfig
+from mlia.backend.executor.config import BaseBackendConfig
+from mlia.backend.executor.config import NamedExecutionConfig
+from mlia.backend.executor.config import UserParamConfig
+from mlia.backend.executor.config import UserParamsConfig
+from mlia.backend.executor.fs import get_backends_path
+from mlia.backend.executor.fs import remove_resource
+from mlia.backend.executor.fs import ResourceType
BACKEND_CONFIG_FILE: Final[str] = "backend-config.json"
diff --git a/src/mlia/backend/config.py b/src/mlia/backend/executor/config.py
index dca53da..dca53da 100644
--- a/src/mlia/backend/config.py
+++ b/src/mlia/backend/executor/config.py
diff --git a/src/mlia/backend/execution.py b/src/mlia/backend/executor/execution.py
index 5c8e53f..e253b16 100644
--- a/src/mlia/backend/execution.py
+++ b/src/mlia/backend/executor/execution.py
@@ -7,13 +7,13 @@ import logging
import re
from typing import cast
-from mlia.backend.application import Application
-from mlia.backend.application import get_application
-from mlia.backend.common import Backend
-from mlia.backend.common import ConfigurationException
-from mlia.backend.common import Param
-from mlia.backend.system import get_system
-from mlia.backend.system import System
+from mlia.backend.executor.application import Application
+from mlia.backend.executor.application import get_application
+from mlia.backend.executor.common import Backend
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.common import Param
+from mlia.backend.executor.system import get_system
+from mlia.backend.executor.system import System
logger = logging.getLogger(__name__)
diff --git a/src/mlia/backend/fs.py b/src/mlia/backend/executor/fs.py
index 3fce19c..3fce19c 100644
--- a/src/mlia/backend/fs.py
+++ b/src/mlia/backend/executor/fs.py
diff --git a/src/mlia/backend/output_consumer.py b/src/mlia/backend/executor/output_consumer.py
index 3c3b132..3c3b132 100644
--- a/src/mlia/backend/output_consumer.py
+++ b/src/mlia/backend/executor/output_consumer.py
diff --git a/src/mlia/backend/proc.py b/src/mlia/backend/executor/proc.py
index 4838e47..39a0689 100644
--- a/src/mlia/backend/proc.py
+++ b/src/mlia/backend/executor/proc.py
@@ -21,7 +21,7 @@ from sh import CommandNotFound
from sh import ErrorReturnCode
from sh import RunningCommand
-from mlia.backend.fs import valid_for_filename
+from mlia.backend.executor.fs import valid_for_filename
logger = logging.getLogger(__name__)
diff --git a/src/mlia/backend/executor/runner.py b/src/mlia/backend/executor/runner.py
new file mode 100644
index 0000000..2330fd9
--- /dev/null
+++ b/src/mlia/backend/executor/runner.py
@@ -0,0 +1,98 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Module for backend runner."""
+from __future__ import annotations
+
+from dataclasses import dataclass
+from pathlib import Path
+
+from mlia.backend.executor.application import get_available_applications
+from mlia.backend.executor.application import install_application
+from mlia.backend.executor.execution import ExecutionContext
+from mlia.backend.executor.execution import run_application
+from mlia.backend.executor.system import get_available_systems
+from mlia.backend.executor.system import install_system
+
+
+@dataclass
+class ExecutionParams:
+ """Application execution params."""
+
+ application: str
+ system: str
+ application_params: list[str]
+ system_params: list[str]
+
+
+class BackendRunner:
+ """Backend runner."""
+
+ def __init__(self) -> None:
+ """Init BackendRunner instance."""
+
+ @staticmethod
+ def get_installed_systems() -> list[str]:
+ """Get list of the installed systems."""
+ return [system.name for system in get_available_systems()]
+
+ @staticmethod
+ def get_installed_applications(system: str | None = None) -> list[str]:
+ """Get list of the installed application."""
+ return [
+ app.name
+ for app in get_available_applications()
+ if system is None or app.can_run_on(system)
+ ]
+
+ def is_application_installed(self, application: str, system: str) -> bool:
+ """Return true if requested application installed."""
+ return application in self.get_installed_applications(system)
+
+ def is_system_installed(self, system: str) -> bool:
+ """Return true if requested system installed."""
+ return system in self.get_installed_systems()
+
+ def systems_installed(self, systems: list[str]) -> bool:
+ """Check if all provided systems are installed."""
+ if not systems:
+ return False
+
+ installed_systems = self.get_installed_systems()
+ return all(system in installed_systems for system in systems)
+
+ def applications_installed(self, applications: list[str]) -> bool:
+ """Check if all provided applications are installed."""
+ if not applications:
+ return False
+
+ installed_apps = self.get_installed_applications()
+ return all(app in installed_apps for app in applications)
+
+ def all_installed(self, systems: list[str], apps: list[str]) -> bool:
+ """Check if all provided artifacts are installed."""
+ return self.systems_installed(systems) and self.applications_installed(apps)
+
+ @staticmethod
+ def install_system(system_path: Path) -> None:
+ """Install system."""
+ install_system(system_path)
+
+ @staticmethod
+ def install_application(app_path: Path) -> None:
+ """Install application."""
+ install_application(app_path)
+
+ @staticmethod
+ def run_application(execution_params: ExecutionParams) -> ExecutionContext:
+ """Run requested application."""
+ ctx = run_application(
+ execution_params.application,
+ execution_params.application_params,
+ execution_params.system,
+ execution_params.system_params,
+ )
+ return ctx
+
+ @staticmethod
+ def _params(name: str, params: list[str]) -> list[str]:
+ return [p for item in [(name, param) for param in params] for p in item]
diff --git a/src/mlia/backend/source.py b/src/mlia/backend/executor/source.py
index c951eae..6abc49f 100644
--- a/src/mlia/backend/source.py
+++ b/src/mlia/backend/executor/source.py
@@ -11,13 +11,13 @@ from abc import abstractmethod
from pathlib import Path
from tarfile import TarFile
-from mlia.backend.common import BACKEND_CONFIG_FILE
-from mlia.backend.common import ConfigurationException
-from mlia.backend.common import get_backend_config
-from mlia.backend.common import is_backend_directory
-from mlia.backend.common import load_config
-from mlia.backend.config import BackendConfig
-from mlia.backend.fs import copy_directory_content
+from mlia.backend.executor.common import BACKEND_CONFIG_FILE
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.common import get_backend_config
+from mlia.backend.executor.common import is_backend_directory
+from mlia.backend.executor.common import load_config
+from mlia.backend.executor.config import BackendConfig
+from mlia.backend.executor.fs import copy_directory_content
class Source(ABC):
diff --git a/src/mlia/backend/system.py b/src/mlia/backend/executor/system.py
index 0e51ab2..a5ecf19 100644
--- a/src/mlia/backend/system.py
+++ b/src/mlia/backend/executor/system.py
@@ -8,17 +8,17 @@ from typing import Any
from typing import cast
from typing import List
-from mlia.backend.common import Backend
-from mlia.backend.common import ConfigurationException
-from mlia.backend.common import get_backend_configs
-from mlia.backend.common import get_backend_directories
-from mlia.backend.common import load_config
-from mlia.backend.common import remove_backend
-from mlia.backend.config import SystemConfig
-from mlia.backend.fs import get_backends_path
-from mlia.backend.proc import run_and_wait
-from mlia.backend.source import create_destination_and_install
-from mlia.backend.source import get_source
+from mlia.backend.executor.common import Backend
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.common import get_backend_configs
+from mlia.backend.executor.common import get_backend_directories
+from mlia.backend.executor.common import load_config
+from mlia.backend.executor.common import remove_backend
+from mlia.backend.executor.config import SystemConfig
+from mlia.backend.executor.fs import get_backends_path
+from mlia.backend.executor.proc import run_and_wait
+from mlia.backend.executor.source import create_destination_and_install
+from mlia.backend.executor.source import get_source
class System(Backend):
diff --git a/src/mlia/tools/metadata/corstone.py b/src/mlia/backend/install.py
index df2dcdb..eea3403 100644
--- a/src/mlia/tools/metadata/corstone.py
+++ b/src/mlia/backend/install.py
@@ -1,29 +1,22 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Module for Corstone based FVPs.
-
-The import of subprocess module raises a B404 bandit error. MLIA usage of
-subprocess is needed and can be considered safe hence disabling the security
-check.
-"""
+"""Module for installation process."""
from __future__ import annotations
import logging
import platform
-import subprocess # nosec
import tarfile
+from abc import ABC
+from abc import abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Callable
from typing import Iterable
from typing import Optional
+from typing import Union
-import mlia.backend.manager as backend_manager
-from mlia.backend.system import remove_system
-from mlia.tools.metadata.common import DownloadAndInstall
-from mlia.tools.metadata.common import Installation
-from mlia.tools.metadata.common import InstallationType
-from mlia.tools.metadata.common import InstallFromPath
+from mlia.backend.executor.runner import BackendRunner
+from mlia.backend.executor.system import remove_system
from mlia.utils.download import DownloadArtifact
from mlia.utils.filesystem import all_files_exist
from mlia.utils.filesystem import all_paths_valid
@@ -31,11 +24,124 @@ from mlia.utils.filesystem import copy_all
from mlia.utils.filesystem import get_mlia_resources
from mlia.utils.filesystem import temp_directory
from mlia.utils.filesystem import working_directory
+from mlia.utils.py_manager import get_package_manager
logger = logging.getLogger(__name__)
+# Mapping backend -> device_type -> system_name
+_SUPPORTED_SYSTEMS = {
+ "Corstone-300": {
+ "ethos-u55": "Corstone-300: Cortex-M55+Ethos-U55",
+ "ethos-u65": "Corstone-300: Cortex-M55+Ethos-U65",
+ },
+ "Corstone-310": {
+ "ethos-u55": "Corstone-310: Cortex-M85+Ethos-U55",
+ "ethos-u65": "Corstone-310: Cortex-M85+Ethos-U65",
+ },
+}
+
+# Mapping system_name -> application
+_SYSTEM_TO_APP_MAP = {
+ "Corstone-300: Cortex-M55+Ethos-U55": "Generic Inference Runner: Ethos-U55",
+ "Corstone-300: Cortex-M55+Ethos-U65": "Generic Inference Runner: Ethos-U65",
+ "Corstone-310: Cortex-M85+Ethos-U55": "Generic Inference Runner: Ethos-U55",
+ "Corstone-310: Cortex-M85+Ethos-U65": "Generic Inference Runner: Ethos-U65",
+}
+
+
+def get_system_name(backend: str, device_type: str) -> str:
+ """Get the system name for the given backend and device type."""
+ return _SUPPORTED_SYSTEMS[backend][device_type]
+
+
+def get_application_name(system_name: str) -> str:
+ """Get application name for the provided system name."""
+ return _SYSTEM_TO_APP_MAP[system_name]
+
+
+def is_supported(backend: str, device_type: str | None = None) -> bool:
+ """Check if the backend (and optionally device type) is supported."""
+ if device_type is None:
+ return backend in _SUPPORTED_SYSTEMS
+
+ try:
+ get_system_name(backend, device_type)
+ return True
+ except KeyError:
+ return False
+
+
+def supported_backends() -> list[str]:
+ """Get a list of all backends supported by the backend manager."""
+ return list(_SUPPORTED_SYSTEMS.keys())
+
+
+def get_all_system_names(backend: str) -> list[str]:
+ """Get all systems supported by the backend."""
+ return list(_SUPPORTED_SYSTEMS.get(backend, {}).values())
+
+
+def get_all_application_names(backend: str) -> list[str]:
+ """Get all applications supported by the backend."""
+ app_set = {_SYSTEM_TO_APP_MAP[sys] for sys in get_all_system_names(backend)}
+ return list(app_set)
+
+
+@dataclass
+class InstallFromPath:
+ """Installation from the local path."""
+
+ backend_path: Path
+
+
+@dataclass
+class DownloadAndInstall:
+ """Download and install."""
+
+ eula_agreement: bool = True
+
+
+InstallationType = Union[InstallFromPath, DownloadAndInstall]
+
+
+class Installation(ABC):
+ """Base class for the installation process of the backends."""
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ """Return name of the backend."""
+
+ @property
+ @abstractmethod
+ def description(self) -> str:
+ """Return description of the backend."""
+
+ @property
+ @abstractmethod
+ def could_be_installed(self) -> bool:
+ """Return true if backend could be installed in current environment."""
+
+ @property
+ @abstractmethod
+ def already_installed(self) -> bool:
+ """Return true if backend is already installed."""
+
+ @abstractmethod
+ def supports(self, install_type: InstallationType) -> bool:
+ """Return true if installation supports requested installation type."""
+
+ @abstractmethod
+ def install(self, install_type: InstallationType) -> None:
+ """Install the backend."""
+
+ @abstractmethod
+ def uninstall(self) -> None:
+ """Uninstall the backend."""
+
+
@dataclass
class BackendInfo:
"""Backend information."""
@@ -75,8 +181,8 @@ class BackendMetadata:
self.download_artifact = download_artifact
self.supported_platforms = supported_platforms
- self.expected_systems = backend_manager.get_all_system_names(name)
- self.expected_apps = backend_manager.get_all_application_names(name)
+ self.expected_systems = get_all_system_names(name)
+ self.expected_apps = get_all_application_names(name)
@property
def expected_resources(self) -> Iterable[Path]:
@@ -99,7 +205,7 @@ class BackendInstallation(Installation):
def __init__(
self,
- backend_runner: backend_manager.BackendRunner,
+ backend_runner: BackendRunner,
metadata: BackendMetadata,
path_checker: PathChecker,
backend_installer: BackendInstaller | None,
@@ -288,130 +394,57 @@ class CompoundPathChecker:
return next(first_resolved_backend_info, None)
-class Corstone300Installer:
- """Helper class that wraps Corstone 300 installation logic."""
+class PyPackageBackendInstallation(Installation):
+ """Backend based on the python package."""
- def __call__(self, eula_agreement: bool, dist_dir: Path) -> Path:
- """Install Corstone-300 and return path to the models."""
- with working_directory(dist_dir):
- install_dir = "corstone-300"
- try:
- fvp_install_cmd = [
- "./FVP_Corstone_SSE-300.sh",
- "-q",
- "-d",
- install_dir,
- ]
- if not eula_agreement:
- fvp_install_cmd += [
- "--nointeractive",
- "--i-agree-to-the-contained-eula",
- ]
-
- # The following line raises a B603 error for bandit. In this
- # specific case, the input is pretty much static and cannot be
- # changed byt the user hence disabling the security check for
- # this instance
- subprocess.check_call(fvp_install_cmd) # nosec
- except subprocess.CalledProcessError as err:
- raise Exception(
- "Error occurred during Corstone-300 installation"
- ) from err
-
- return dist_dir / install_dir
-
-
-def get_corstone_300_installation() -> Installation:
- """Get Corstone-300 installation."""
- corstone_300 = BackendInstallation(
- backend_runner=backend_manager.BackendRunner(),
- # pylint: disable=line-too-long
- metadata=BackendMetadata(
- name="Corstone-300",
- description="Corstone-300 FVP",
- system_config="backend_configs/systems/corstone-300/backend-config.json",
- apps_resources=[],
- fvp_dir_name="corstone_300",
- download_artifact=DownloadArtifact(
- name="Corstone-300 FVP",
- url="https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz",
- filename="FVP_Corstone_SSE-300_11.16_26.tgz",
- version="11.16_26",
- sha256_hash="e26139be756b5003a30d978c629de638aed1934d597dc24a17043d4708e934d7",
- ),
- supported_platforms=["Linux"],
- ),
- # pylint: enable=line-too-long
- path_checker=CompoundPathChecker(
- PackagePathChecker(
- expected_files=[
- "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55",
- "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U65",
- ],
- backend_subfolder="models/Linux64_GCC-6.4",
- ),
- StaticPathChecker(
- static_backend_path=Path("/opt/VHT"),
- expected_files=[
- "VHT_Corstone_SSE-300_Ethos-U55",
- "VHT_Corstone_SSE-300_Ethos-U65",
- ],
- copy_source=False,
- system_config=(
- "backend_configs/systems/corstone-300-vht/backend-config.json"
- ),
- ),
- ),
- backend_installer=Corstone300Installer(),
- )
-
- return corstone_300
-
-
-def get_corstone_310_installation() -> Installation:
- """Get Corstone-310 installation."""
- corstone_310 = BackendInstallation(
- backend_runner=backend_manager.BackendRunner(),
- # pylint: disable=line-too-long
- metadata=BackendMetadata(
- name="Corstone-310",
- description="Corstone-310 FVP",
- system_config="backend_configs/systems/corstone-310/backend-config.json",
- apps_resources=[],
- fvp_dir_name="corstone_310",
- download_artifact=None,
- supported_platforms=["Linux"],
- ),
- # pylint: enable=line-too-long
- path_checker=CompoundPathChecker(
- PackagePathChecker(
- expected_files=[
- "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310",
- "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310_Ethos-U65",
- ],
- backend_subfolder="models/Linux64_GCC-9.3",
- ),
- StaticPathChecker(
- static_backend_path=Path("/opt/VHT"),
- expected_files=[
- "VHT_Corstone_SSE-310",
- "VHT_Corstone_SSE-310_Ethos-U65",
- ],
- copy_source=False,
- system_config=(
- "backend_configs/systems/corstone-310-vht/backend-config.json"
- ),
- ),
- ),
- backend_installer=None,
- )
-
- return corstone_310
-
-
-def get_corstone_installations() -> list[Installation]:
- """Get Corstone installations."""
- return [
- get_corstone_300_installation(),
- get_corstone_310_installation(),
- ]
+ def __init__(
+ self,
+ name: str,
+ description: str,
+ packages_to_install: list[str],
+ packages_to_uninstall: list[str],
+ expected_packages: list[str],
+ ) -> None:
+ """Init the backend installation."""
+ self._name = name
+ self._description = description
+ self._packages_to_install = packages_to_install
+ self._packages_to_uninstall = packages_to_uninstall
+ self._expected_packages = expected_packages
+
+ self.package_manager = get_package_manager()
+
+ @property
+ def name(self) -> str:
+ """Return name of the backend."""
+ return self._name
+
+ @property
+ def description(self) -> str:
+ """Return description of the backend."""
+ return self._description
+
+ @property
+ def could_be_installed(self) -> bool:
+ """Check if backend could be installed."""
+ return True
+
+ @property
+ def already_installed(self) -> bool:
+ """Check if backend already installed."""
+ return self.package_manager.packages_installed(self._expected_packages)
+
+ def supports(self, install_type: InstallationType) -> bool:
+ """Return true if installation supports requested installation type."""
+ return isinstance(install_type, DownloadAndInstall)
+
+ def install(self, install_type: InstallationType) -> None:
+ """Install the backend."""
+ if not self.supports(install_type):
+ raise Exception(f"Unsupported installation type {install_type}")
+
+ self.package_manager.install(self._packages_to_install)
+
+ def uninstall(self) -> None:
+ """Uninstall the backend."""
+ self.package_manager.uninstall(self._packages_to_uninstall)
diff --git a/src/mlia/backend/manager.py b/src/mlia/backend/manager.py
index 6a61ab0..c02dc6e 100644
--- a/src/mlia/backend/manager.py
+++ b/src/mlia/backend/manager.py
@@ -1,372 +1,271 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Module for backend integration."""
+"""Module for installation process."""
from __future__ import annotations
import logging
from abc import ABC
from abc import abstractmethod
-from dataclasses import dataclass
from pathlib import Path
-from typing import Literal
+from typing import Callable
-from mlia.backend.application import get_available_applications
-from mlia.backend.application import install_application
-from mlia.backend.execution import ExecutionContext
-from mlia.backend.execution import run_application
-from mlia.backend.output_consumer import Base64OutputConsumer
-from mlia.backend.output_consumer import OutputConsumer
-from mlia.backend.system import get_available_systems
-from mlia.backend.system import install_system
+from mlia.backend.install import DownloadAndInstall
+from mlia.backend.install import Installation
+from mlia.backend.install import InstallationType
+from mlia.backend.install import InstallFromPath
+from mlia.core.errors import ConfigurationError
+from mlia.core.errors import InternalError
+from mlia.utils.misc import yes
logger = logging.getLogger(__name__)
-# Mapping backend -> device_type -> system_name
-_SUPPORTED_SYSTEMS = {
- "Corstone-300": {
- "ethos-u55": "Corstone-300: Cortex-M55+Ethos-U55",
- "ethos-u65": "Corstone-300: Cortex-M55+Ethos-U65",
- },
- "Corstone-310": {
- "ethos-u55": "Corstone-310: Cortex-M85+Ethos-U55",
- "ethos-u65": "Corstone-310: Cortex-M85+Ethos-U65",
- },
-}
+InstallationFilter = Callable[[Installation], bool]
-# Mapping system_name -> application
-_SYSTEM_TO_APP_MAP = {
- "Corstone-300: Cortex-M55+Ethos-U55": "Generic Inference Runner: Ethos-U55",
- "Corstone-300: Cortex-M55+Ethos-U65": "Generic Inference Runner: Ethos-U65",
- "Corstone-310: Cortex-M85+Ethos-U55": "Generic Inference Runner: Ethos-U55",
- "Corstone-310: Cortex-M85+Ethos-U65": "Generic Inference Runner: Ethos-U65",
-}
+class AlreadyInstalledFilter:
+ """Filter for already installed backends."""
-def get_system_name(backend: str, device_type: str) -> str:
- """Get the system name for the given backend and device type."""
- return _SUPPORTED_SYSTEMS[backend][device_type]
+ def __call__(self, installation: Installation) -> bool:
+ """Installation filter."""
+ return installation.already_installed
-def is_supported(backend: str, device_type: str | None = None) -> bool:
- """Check if the backend (and optionally device type) is supported."""
- if device_type is None:
- return backend in _SUPPORTED_SYSTEMS
+class ReadyForInstallationFilter:
+ """Filter for ready to be installed backends."""
- try:
- get_system_name(backend, device_type)
- return True
- except KeyError:
- return False
+ def __call__(self, installation: Installation) -> bool:
+ """Installation filter."""
+ return installation.could_be_installed and not installation.already_installed
-def supported_backends() -> list[str]:
- """Get a list of all backends supported by the backend manager."""
- return list(_SUPPORTED_SYSTEMS.keys())
+class SupportsInstallTypeFilter:
+ """Filter backends that support certain type of the installation."""
+ def __init__(self, installation_type: InstallationType) -> None:
+ """Init filter."""
+ self.installation_type = installation_type
-def get_all_system_names(backend: str) -> list[str]:
- """Get all systems supported by the backend."""
- return list(_SUPPORTED_SYSTEMS.get(backend, {}).values())
+ def __call__(self, installation: Installation) -> bool:
+ """Installation filter."""
+ return installation.supports(self.installation_type)
-def get_all_application_names(backend: str) -> list[str]:
- """Get all applications supported by the backend."""
- app_set = {_SYSTEM_TO_APP_MAP[sys] for sys in get_all_system_names(backend)}
- return list(app_set)
+class SearchByNameFilter:
+ """Filter installation by name."""
+ def __init__(self, backend_name: str | None) -> None:
+ """Init filter."""
+ self.backend_name = backend_name
-@dataclass
-class DeviceInfo:
- """Device information."""
-
- device_type: Literal["ethos-u55", "ethos-u65"]
- mac: int
-
-
-@dataclass
-class ModelInfo:
- """Model info."""
-
- model_path: Path
-
-
-@dataclass
-class PerformanceMetrics:
- """Performance metrics parsed from generic inference output."""
-
- npu_active_cycles: int
- npu_idle_cycles: int
- npu_total_cycles: int
- npu_axi0_rd_data_beat_received: int
- npu_axi0_wr_data_beat_written: int
- npu_axi1_rd_data_beat_received: int
-
-
-@dataclass
-class ExecutionParams:
- """Application execution params."""
-
- application: str
- system: str
- application_params: list[str]
- system_params: list[str]
-
-
-class LogWriter(OutputConsumer):
- """Redirect output to the logger."""
-
- def feed(self, line: str) -> bool:
- """Process line from the output."""
- logger.debug(line.strip())
- return False
+ def __call__(self, installation: Installation) -> bool:
+ """Installation filter."""
+ return (
+ not self.backend_name
+ or installation.name.casefold() == self.backend_name.casefold()
+ )
-class GenericInferenceOutputParser(Base64OutputConsumer):
- """Generic inference app output parser."""
+class InstallationManager(ABC):
+ """Helper class for managing installations."""
- def __init__(self) -> None:
- """Init generic inference output parser instance."""
- super().__init__()
- self._map = {
- "NPU ACTIVE": "npu_active_cycles",
- "NPU IDLE": "npu_idle_cycles",
- "NPU TOTAL": "npu_total_cycles",
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": "npu_axi0_rd_data_beat_received",
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": "npu_axi0_wr_data_beat_written",
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": "npu_axi1_rd_data_beat_received",
- }
+ @abstractmethod
+ def install_from(self, backend_path: Path, backend_name: str, force: bool) -> None:
+ """Install backend from the local directory."""
- @property
- def result(self) -> dict:
- """Merge the raw results and map the names to the right output names."""
- merged_result = {}
- for raw_result in self.parsed_output:
- for profiling_result in raw_result:
- for sample in profiling_result["samples"]:
- name, values = (sample["name"], sample["value"])
- if name in merged_result:
- raise KeyError(
- f"Duplicate key '{name}' in base64 output.",
- )
- new_name = self._map[name]
- merged_result[new_name] = values[0]
- return merged_result
+ @abstractmethod
+ def download_and_install(
+ self, backend_name: str, eula_agreement: bool, force: bool
+ ) -> None:
+ """Download and install backends."""
- def is_ready(self) -> bool:
- """Return true if all expected data has been parsed."""
- return set(self.result.keys()) == set(self._map.values())
+ @abstractmethod
+ def show_env_details(self) -> None:
+ """Show environment details."""
- def missed_keys(self) -> set[str]:
- """Return a set of the keys that have not been found in the output."""
- return set(self._map.values()) - set(self.result.keys())
+ @abstractmethod
+ def backend_installed(self, backend_name: str) -> bool:
+ """Return true if requested backend installed."""
+ @abstractmethod
+ def uninstall(self, backend_name: str) -> None:
+ """Delete the existing installation."""
-class BackendRunner:
- """Backend runner."""
- def __init__(self) -> None:
- """Init BackendRunner instance."""
+class InstallationFiltersMixin:
+ """Mixin for filtering installation based on different conditions."""
- @staticmethod
- def get_installed_systems() -> list[str]:
- """Get list of the installed systems."""
- return [system.name for system in get_available_systems()]
+ installations: list[Installation]
- @staticmethod
- def get_installed_applications(system: str | None = None) -> list[str]:
- """Get list of the installed application."""
+ def filter_by(self, *filters: InstallationFilter) -> list[Installation]:
+ """Filter installations."""
return [
- app.name
- for app in get_available_applications()
- if system is None or app.can_run_on(system)
+ installation
+ for installation in self.installations
+ if all(filter_(installation) for filter_ in filters)
]
- def is_application_installed(self, application: str, system: str) -> bool:
- """Return true if requested application installed."""
- return application in self.get_installed_applications(system)
-
- def is_system_installed(self, system: str) -> bool:
- """Return true if requested system installed."""
- return system in self.get_installed_systems()
-
- def systems_installed(self, systems: list[str]) -> bool:
- """Check if all provided systems are installed."""
- if not systems:
- return False
-
- installed_systems = self.get_installed_systems()
- return all(system in installed_systems for system in systems)
-
- def applications_installed(self, applications: list[str]) -> bool:
- """Check if all provided applications are installed."""
- if not applications:
- return False
-
- installed_apps = self.get_installed_applications()
- return all(app in installed_apps for app in applications)
+ def find_by_name(self, backend_name: str) -> list[Installation]:
+ """Return list of the backends filtered by name."""
+ return self.filter_by(SearchByNameFilter(backend_name))
- def all_installed(self, systems: list[str], apps: list[str]) -> bool:
- """Check if all provided artifacts are installed."""
- return self.systems_installed(systems) and self.applications_installed(apps)
-
- @staticmethod
- def install_system(system_path: Path) -> None:
- """Install system."""
- install_system(system_path)
-
- @staticmethod
- def install_application(app_path: Path) -> None:
- """Install application."""
- install_application(app_path)
-
- @staticmethod
- def run_application(execution_params: ExecutionParams) -> ExecutionContext:
- """Run requested application."""
- ctx = run_application(
- execution_params.application,
- execution_params.application_params,
- execution_params.system,
- execution_params.system_params,
+ def already_installed(self, backend_name: str = None) -> list[Installation]:
+ """Return list of backends that are already installed."""
+ return self.filter_by(
+ AlreadyInstalledFilter(),
+ SearchByNameFilter(backend_name),
)
- return ctx
- @staticmethod
- def _params(name: str, params: list[str]) -> list[str]:
- return [p for item in [(name, param) for param in params] for p in item]
+ def ready_for_installation(self) -> list[Installation]:
+ """Return list of the backends that could be installed."""
+ return self.filter_by(ReadyForInstallationFilter())
-class GenericInferenceRunner(ABC):
- """Abstract class for generic inference runner."""
+class DefaultInstallationManager(InstallationManager, InstallationFiltersMixin):
+ """Interactive installation manager."""
- def __init__(self, backend_runner: BackendRunner):
- """Init generic inference runner instance."""
- self.backend_runner = backend_runner
-
- def run(
- self, model_info: ModelInfo, output_consumers: list[OutputConsumer]
+ def __init__(
+ self, installations: list[Installation], noninteractive: bool = False
) -> None:
- """Run generic inference for the provided device/model."""
- execution_params = self.get_execution_params(model_info)
-
- ctx = self.backend_runner.run_application(execution_params)
- if ctx.stdout is not None:
- ctx.stdout = self.consume_output(ctx.stdout, output_consumers)
-
- @abstractmethod
- def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams:
- """Get execution params for the provided model."""
-
- def check_system_and_application(self, system_name: str, app_name: str) -> None:
- """Check if requested system and application installed."""
- if not self.backend_runner.is_system_installed(system_name):
- raise Exception(f"System {system_name} is not installed")
-
- if not self.backend_runner.is_application_installed(app_name, system_name):
- raise Exception(
- f"Application {app_name} for the system {system_name} "
- "is not installed"
+ """Init the manager."""
+ self.installations = installations
+ self.noninteractive = noninteractive
+
+ def _install(
+ self,
+ backend_name: str,
+ install_type: InstallationType,
+ prompt: Callable[[Installation], str],
+ force: bool,
+ ) -> None:
+ """Check metadata and install backend."""
+ installs = self.find_by_name(backend_name)
+
+ if not installs:
+ logger.info("Unknown backend '%s'.", backend_name)
+ logger.info(
+ "Please run command 'mlia-backend list' to get list of "
+ "supported backend names."
)
- @staticmethod
- def consume_output(output: bytearray, consumers: list[OutputConsumer]) -> bytearray:
- """
- Pass program's output to the consumers and filter it.
+ return
+
+ if len(installs) > 1:
+ raise InternalError(f"More than one backend with name {backend_name} found")
+
+ installation = installs[0]
+ if not installation.supports(install_type):
+ if isinstance(install_type, InstallFromPath):
+ logger.info(
+ "Backend '%s' could not be installed using path '%s'.",
+ installation.name,
+ install_type.backend_path,
+ )
+ logger.info(
+ "Please check that '%s' is a valid path to the installed backend.",
+ install_type.backend_path,
+ )
+ else:
+ logger.info(
+ "Backend '%s' could not be downloaded and installed",
+ installation.name,
+ )
+ logger.info(
+ "Please refer to the project's documentation for more details."
+ )
+
+ return
+
+ if installation.already_installed and not force:
+ logger.info("Backend '%s' is already installed.", installation.name)
+ logger.info("Please, consider using --force option.")
+ return
+
+ proceed = self.noninteractive or yes(prompt(installation))
+ if not proceed:
+ logger.info("%s installation canceled.", installation.name)
+ return
+
+ if installation.already_installed and force:
+ logger.info(
+ "Force installing %s, so delete the existing "
+ "installed backend first.",
+ installation.name,
+ )
+ installation.uninstall()
- Returns the filtered output.
- """
- filtered_output = bytearray()
- for line_bytes in output.splitlines():
- line = line_bytes.decode("utf-8")
- remove_line = False
- for consumer in consumers:
- if consumer.feed(line):
- remove_line = True
- if not remove_line:
- filtered_output.extend(line_bytes)
+ installation.install(install_type)
+ logger.info("%s successfully installed.", installation.name)
- return filtered_output
+ def install_from(
+ self, backend_path: Path, backend_name: str, force: bool = False
+ ) -> None:
+ """Install from the provided directory."""
+ def prompt(install: Installation) -> str:
+ return (
+ f"{install.name} was found in {backend_path}. "
+ "Would you like to install it?"
+ )
-class GenericInferenceRunnerEthosU(GenericInferenceRunner):
- """Generic inference runner on U55/65."""
+ install_type = InstallFromPath(backend_path)
+ self._install(backend_name, install_type, prompt, force)
- def __init__(
- self, backend_runner: BackendRunner, device_info: DeviceInfo, backend: str
+ def download_and_install(
+ self, backend_name: str, eula_agreement: bool = True, force: bool = False
) -> None:
- """Init generic inference runner instance."""
- super().__init__(backend_runner)
+ """Download and install available backends."""
- system_name, app_name = self.resolve_system_and_app(device_info, backend)
- self.system_name = system_name
- self.app_name = app_name
- self.device_info = device_info
+ def prompt(install: Installation) -> str:
+ return f"Would you like to download and install {install.name}?"
- @staticmethod
- def resolve_system_and_app(
- device_info: DeviceInfo, backend: str
- ) -> tuple[str, str]:
- """Find appropriate system and application for the provided device/backend."""
- try:
- system_name = get_system_name(backend, device_info.device_type)
- except KeyError as ex:
- raise RuntimeError(
- f"Unsupported device {device_info.device_type} "
- f"for backend {backend}"
- ) from ex
-
- try:
- app_name = _SYSTEM_TO_APP_MAP[system_name]
- except KeyError as err:
- raise RuntimeError(f"System {system_name} is not installed") from err
-
- return system_name, app_name
-
- def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams:
- """Get execution params for Ethos-U55/65."""
- self.check_system_and_application(self.system_name, self.app_name)
-
- system_params = [
- f"mac={self.device_info.mac}",
- f"input_file={model_info.model_path.absolute()}",
- ]
+ install_type = DownloadAndInstall(eula_agreement=eula_agreement)
+ self._install(backend_name, install_type, prompt, force)
- return ExecutionParams(
- self.app_name,
- self.system_name,
- [],
- system_params,
- )
+ def show_env_details(self) -> None:
+ """Print current state of the execution environment."""
+ if installed := self.already_installed():
+ self._print_installation_list("Installed backends:", installed)
+
+ if could_be_installed := self.ready_for_installation():
+ self._print_installation_list(
+ "Following backends could be installed:",
+ could_be_installed,
+ new_section=bool(installed),
+ )
+ if not installed and not could_be_installed:
+ logger.info("No backends installed")
-def get_generic_runner(device_info: DeviceInfo, backend: str) -> GenericInferenceRunner:
- """Get generic runner for provided device and backend."""
- backend_runner = get_backend_runner()
- return GenericInferenceRunnerEthosU(backend_runner, device_info, backend)
+ @staticmethod
+ def _print_installation_list(
+ header: str, installations: list[Installation], new_section: bool = False
+ ) -> None:
+ """Print list of the installations."""
+ logger.info("%s%s\n", "\n" if new_section else "", header)
+ for installation in installations:
+ logger.info(" - %s", installation.name)
-def estimate_performance(
- model_info: ModelInfo, device_info: DeviceInfo, backend: str
-) -> PerformanceMetrics:
- """Get performance estimations."""
- output_parser = GenericInferenceOutputParser()
- output_consumers = [output_parser, LogWriter()]
+ def uninstall(self, backend_name: str) -> None:
+ """Uninstall the backend with name backend_name."""
+ installations = self.already_installed(backend_name)
- generic_runner = get_generic_runner(device_info, backend)
- generic_runner.run(model_info, output_consumers)
+ if not installations:
+ raise ConfigurationError(f"Backend '{backend_name}' is not installed")
- if not output_parser.is_ready():
- missed_data = ",".join(output_parser.missed_keys())
- logger.debug("Unable to get performance metrics, missed data %s", missed_data)
- raise Exception("Unable to get performance metrics, insufficient data")
+ if len(installations) != 1:
+ raise InternalError(
+ f"More than one installed backend with name {backend_name} found"
+ )
- return PerformanceMetrics(**output_parser.result)
+ installation = installations[0]
+ installation.uninstall()
+ logger.info("%s successfully uninstalled.", installation.name)
-def get_backend_runner() -> BackendRunner:
- """
- Return BackendRunner instance.
+ def backend_installed(self, backend_name: str) -> bool:
+ """Return true if requested backend installed."""
+ installations = self.already_installed(backend_name)
- Note: This is needed for the unit tests.
- """
- return BackendRunner()
+ return len(installations) == 1
diff --git a/src/mlia/backend/tosa_checker/__init__.py b/src/mlia/backend/tosa_checker/__init__.py
new file mode 100644
index 0000000..cec210d
--- /dev/null
+++ b/src/mlia/backend/tosa_checker/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""TOSA checker backend module."""
diff --git a/src/mlia/backend/tosa_checker/install.py b/src/mlia/backend/tosa_checker/install.py
new file mode 100644
index 0000000..72454bc
--- /dev/null
+++ b/src/mlia/backend/tosa_checker/install.py
@@ -0,0 +1,19 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Module for python package based installations."""
+from __future__ import annotations
+
+from mlia.backend.install import Installation
+from mlia.backend.install import PyPackageBackendInstallation
+
+
+def get_tosa_backend_installation() -> Installation:
+ """Get TOSA backend installation."""
+ return PyPackageBackendInstallation(
+ name="tosa-checker",
+ description="Tool to check if a ML model is compatible "
+ "with the TOSA specification",
+ packages_to_install=["mlia[tosa]"],
+ packages_to_uninstall=["tosa-checker"],
+ expected_packages=["tosa-checker"],
+ )
diff --git a/src/mlia/backend/vela/__init__.py b/src/mlia/backend/vela/__init__.py
new file mode 100644
index 0000000..6ea0c21
--- /dev/null
+++ b/src/mlia/backend/vela/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Vela backend module."""
diff --git a/src/mlia/backend/vela/compat.py b/src/mlia/backend/vela/compat.py
new file mode 100644
index 0000000..3ec42d1
--- /dev/null
+++ b/src/mlia/backend/vela/compat.py
@@ -0,0 +1,158 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Vela operator compatibility module."""
+from __future__ import annotations
+
+import itertools
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+
+from ethosu.vela.operation import Op
+from ethosu.vela.tflite_mapping import optype_to_builtintype
+from ethosu.vela.tflite_model_semantic import TFLiteSemantic
+from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
+from ethosu.vela.vela import generate_supported_ops
+
+from mlia.backend.vela.compiler import VelaCompiler
+from mlia.backend.vela.compiler import VelaCompilerOptions
+from mlia.utils.logging import redirect_output
+
+
+logger = logging.getLogger(__name__)
+
+VELA_INTERNAL_OPS = (Op.Placeholder, Op.SubgraphInput, Op.Const)
+
+
+@dataclass
+class NpuSupported:
+ """Operator's npu supported attribute."""
+
+ supported: bool
+ reasons: list[tuple[str, str]]
+
+
+@dataclass
+class Operator:
+ """Model operator."""
+
+ name: str
+ op_type: str
+ run_on_npu: NpuSupported
+
+ @property
+ def cpu_only(self) -> bool:
+ """Return true if operator is CPU only."""
+ cpu_only_reasons = [("CPU only operator", "")]
+ return (
+ not self.run_on_npu.supported
+ and self.run_on_npu.reasons == cpu_only_reasons
+ )
+
+
+@dataclass
+class Operators:
+ """Model's operators."""
+
+ ops: list[Operator]
+
+ @property
+ def npu_supported_ratio(self) -> float:
+ """Return NPU supported ratio."""
+ total = self.total_number
+ npu_supported = self.npu_supported_number
+
+ if total == 0 or npu_supported == 0:
+ return 0
+
+ return npu_supported / total
+
+ @property
+ def npu_unsupported_ratio(self) -> float:
+ """Return NPU unsupported ratio."""
+ return 1 - self.npu_supported_ratio
+
+ @property
+ def total_number(self) -> int:
+ """Return total number of operators."""
+ return len(self.ops)
+
+ @property
+ def npu_supported_number(self) -> int:
+ """Return number of npu supported operators."""
+ return sum(op.run_on_npu.supported for op in self.ops)
+
+
+def supported_operators(
+ model_path: Path, compiler_options: VelaCompilerOptions
+) -> Operators:
+ """Return list of model's operators."""
+ logger.debug("Check supported operators for the model %s", model_path)
+
+ vela_compiler = VelaCompiler(compiler_options)
+ initial_model = vela_compiler.read_model(model_path)
+
+ return Operators(
+ [
+ Operator(op.name, optype_to_builtintype(op.type), run_on_npu(op))
+ for sg in initial_model.nng.subgraphs
+ for op in sg.get_all_ops()
+ if op.type not in VELA_INTERNAL_OPS
+ ]
+ )
+
+
+def run_on_npu(operator: Op) -> NpuSupported:
+ """Return information if operator can run on NPU.
+
+ Vela does a number of checks that can help establish whether
+ a particular operator is supported to run on NPU.
+
+ There are two groups of checks:
+ - general TensorFlow Lite constraints
+ - operator specific constraints
+
+ If an operator is not supported on NPU then this function
+ will return the reason of that.
+
+ The reason is split in two parts:
+ - general description of why the operator cannot be placed on NPU
+ - details on the particular operator
+ """
+ semantic_checker = TFLiteSemantic()
+ semantic_constraints = itertools.chain(
+ semantic_checker.generic_constraints,
+ semantic_checker.specific_constraints[operator.type],
+ )
+
+ for constraint in semantic_constraints:
+ op_valid, op_reason = constraint(operator)
+ if not op_valid:
+ return NpuSupported(False, [(constraint.__doc__, op_reason)])
+
+ if operator.type not in TFLiteSupportedOperators.supported_operators:
+ reasons = (
+ [("CPU only operator", "")]
+ if operator.type not in VELA_INTERNAL_OPS
+ else []
+ )
+
+ return NpuSupported(False, reasons)
+
+ tflite_supported_operators = TFLiteSupportedOperators()
+ operation_constraints = itertools.chain(
+ tflite_supported_operators.generic_constraints,
+ tflite_supported_operators.specific_constraints[operator.type],
+ )
+ for constraint in operation_constraints:
+ op_valid, op_reason = constraint(operator)
+ if not op_valid:
+ return NpuSupported(False, [(constraint.__doc__, op_reason)])
+
+ return NpuSupported(True, [])
+
+
+def generate_supported_operators_report() -> None:
+ """Generate supported operators report in current working directory."""
+ with redirect_output(logger):
+ generate_supported_ops()
diff --git a/src/mlia/tools/vela_wrapper.py b/src/mlia/backend/vela/compiler.py
index 00d2f2c..3d3847a 100644
--- a/src/mlia/tools/vela_wrapper.py
+++ b/src/mlia/backend/vela/compiler.py
@@ -1,9 +1,8 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Vela wrapper module."""
+"""Vela compiler wrapper module."""
from __future__ import annotations
-import itertools
import logging
import sys
from dataclasses import dataclass
@@ -11,7 +10,6 @@ from pathlib import Path
from typing import Any
from typing import Literal
-import numpy as np
from ethosu.vela.architecture_features import ArchitectureFeatures
from ethosu.vela.compiler_driver import compiler_driver
from ethosu.vela.compiler_driver import CompilerOptions
@@ -20,106 +18,18 @@ from ethosu.vela.model_reader import ModelReaderOptions
from ethosu.vela.model_reader import read_model
from ethosu.vela.nn_graph import Graph
from ethosu.vela.nn_graph import NetworkType
-from ethosu.vela.npu_performance import PassCycles
from ethosu.vela.operation import CustomType
-from ethosu.vela.operation import Op
from ethosu.vela.scheduler import OptimizationStrategy
from ethosu.vela.scheduler import SchedulerOptions
from ethosu.vela.tensor import BandwidthDirection
from ethosu.vela.tensor import MemArea
from ethosu.vela.tensor import Tensor
-from ethosu.vela.tflite_mapping import optype_to_builtintype
-from ethosu.vela.tflite_model_semantic import TFLiteSemantic
-from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
from ethosu.vela.tflite_writer import write_tflite
-from ethosu.vela.vela import generate_supported_ops
from mlia.utils.logging import redirect_output
-
logger = logging.getLogger(__name__)
-VELA_INTERNAL_OPS = (Op.Placeholder, Op.SubgraphInput, Op.Const)
-
-
-@dataclass
-class PerformanceMetrics: # pylint: disable=too-many-instance-attributes
- """Contains all the performance metrics Vela generates in a run."""
-
- npu_cycles: int
- sram_access_cycles: int
- dram_access_cycles: int
- on_chip_flash_access_cycles: int
- off_chip_flash_access_cycles: int
- total_cycles: int
- batch_inference_time: float
- inferences_per_second: float
- batch_size: int
- unknown_memory_area_size: int
- sram_memory_area_size: int
- dram_memory_area_size: int
- on_chip_flash_memory_area_size: int
- off_chip_flash_memory_area_size: int
-
-
-@dataclass
-class NpuSupported:
- """Operator's npu supported attribute."""
-
- supported: bool
- reasons: list[tuple[str, str]]
-
-
-@dataclass
-class Operator:
- """Model operator."""
-
- name: str
- op_type: str
- run_on_npu: NpuSupported
-
- @property
- def cpu_only(self) -> bool:
- """Return true if operator is CPU only."""
- cpu_only_reasons = [("CPU only operator", "")]
- return (
- not self.run_on_npu.supported
- and self.run_on_npu.reasons == cpu_only_reasons
- )
-
-
-@dataclass
-class Operators:
- """Model's operators."""
-
- ops: list[Operator]
-
- @property
- def npu_supported_ratio(self) -> float:
- """Return NPU supported ratio."""
- total = self.total_number
- npu_supported = self.npu_supported_number
-
- if total == 0 or npu_supported == 0:
- return 0
-
- return npu_supported / total
-
- @property
- def npu_unsupported_ratio(self) -> float:
- """Return NPU unsupported ratio."""
- return 1 - self.npu_supported_ratio
-
- @property
- def total_number(self) -> int:
- """Return total number of operators."""
- return len(self.ops)
-
- @property
- def npu_supported_number(self) -> int:
- """Return number of npu supported operators."""
- return sum(op.run_on_npu.supported for op in self.ops)
-
@dataclass
class Model:
@@ -347,30 +257,6 @@ def resolve_compiler_config(
return vela_compiler.get_config()
-def estimate_performance(
- model_path: Path, compiler_options: VelaCompilerOptions
-) -> PerformanceMetrics:
- """Return performance estimations for the model/device.
-
- Logic for this function comes from Vela module stats_writer.py
- """
- logger.debug(
- "Estimate performance for the model %s on %s",
- model_path,
- compiler_options.accelerator_config,
- )
-
- vela_compiler = VelaCompiler(compiler_options)
-
- initial_model = vela_compiler.read_model(model_path)
- if initial_model.optimized:
- raise Exception("Unable to estimate performance for the given optimized model")
-
- optimized_model = vela_compiler.compile_model(initial_model)
-
- return _performance_metrics(optimized_model)
-
-
def optimize_model(
model_path: Path, compiler_options: VelaCompilerOptions, output_model_path: Path
) -> None:
@@ -386,112 +272,3 @@ def optimize_model(
logger.debug("Save optimized model into %s", output_model_path)
optimized_model.save(output_model_path)
-
-
-def _performance_metrics(optimized_model: OptimizedModel) -> PerformanceMetrics:
- """Return performance metrics for optimized model."""
- cycles = optimized_model.nng.cycles
-
- def memory_usage(mem_area: MemArea) -> int:
- """Get memory usage for the proviced memory area type."""
- memory_used: dict[MemArea, int] = optimized_model.nng.memory_used
- bandwidths = optimized_model.nng.bandwidths
-
- return memory_used.get(mem_area, 0) if np.sum(bandwidths[mem_area]) > 0 else 0
-
- midpoint_fps = np.nan
- midpoint_inference_time = cycles[PassCycles.Total] / optimized_model.arch.core_clock
- if midpoint_inference_time > 0:
- midpoint_fps = 1 / midpoint_inference_time
-
- return PerformanceMetrics(
- npu_cycles=int(cycles[PassCycles.Npu]),
- sram_access_cycles=int(cycles[PassCycles.SramAccess]),
- dram_access_cycles=int(cycles[PassCycles.DramAccess]),
- on_chip_flash_access_cycles=int(cycles[PassCycles.OnChipFlashAccess]),
- off_chip_flash_access_cycles=int(cycles[PassCycles.OffChipFlashAccess]),
- total_cycles=int(cycles[PassCycles.Total]),
- batch_inference_time=midpoint_inference_time * 1000,
- inferences_per_second=midpoint_fps,
- batch_size=optimized_model.nng.batch_size,
- unknown_memory_area_size=memory_usage(MemArea.Unknown),
- sram_memory_area_size=memory_usage(MemArea.Sram),
- dram_memory_area_size=memory_usage(MemArea.Dram),
- on_chip_flash_memory_area_size=memory_usage(MemArea.OnChipFlash),
- off_chip_flash_memory_area_size=memory_usage(MemArea.OffChipFlash),
- )
-
-
-def supported_operators(
- model_path: Path, compiler_options: VelaCompilerOptions
-) -> Operators:
- """Return list of model's operators."""
- logger.debug("Check supported operators for the model %s", model_path)
-
- vela_compiler = VelaCompiler(compiler_options)
- initial_model = vela_compiler.read_model(model_path)
-
- return Operators(
- [
- Operator(op.name, optype_to_builtintype(op.type), run_on_npu(op))
- for sg in initial_model.nng.subgraphs
- for op in sg.get_all_ops()
- if op.type not in VELA_INTERNAL_OPS
- ]
- )
-
-
-def run_on_npu(operator: Op) -> NpuSupported:
- """Return information if operator can run on NPU.
-
- Vela does a number of checks that can help establish whether
- a particular operator is supported to run on NPU.
-
- There are two groups of checks:
- - general TensorFlow Lite constraints
- - operator specific constraints
-
- If an operator is not supported on NPU then this function
- will return the reason of that.
-
- The reason is split in two parts:
- - general description of why the operator cannot be placed on NPU
- - details on the particular operator
- """
- semantic_checker = TFLiteSemantic()
- semantic_constraints = itertools.chain(
- semantic_checker.generic_constraints,
- semantic_checker.specific_constraints[operator.type],
- )
-
- for constraint in semantic_constraints:
- op_valid, op_reason = constraint(operator)
- if not op_valid:
- return NpuSupported(False, [(constraint.__doc__, op_reason)])
-
- if operator.type not in TFLiteSupportedOperators.supported_operators:
- reasons = (
- [("CPU only operator", "")]
- if operator.type not in VELA_INTERNAL_OPS
- else []
- )
-
- return NpuSupported(False, reasons)
-
- tflite_supported_operators = TFLiteSupportedOperators()
- operation_constraints = itertools.chain(
- tflite_supported_operators.generic_constraints,
- tflite_supported_operators.specific_constraints[operator.type],
- )
- for constraint in operation_constraints:
- op_valid, op_reason = constraint(operator)
- if not op_valid:
- return NpuSupported(False, [(constraint.__doc__, op_reason)])
-
- return NpuSupported(True, [])
-
-
-def generate_supported_operators_report() -> None:
- """Generate supported operators report in current working directory."""
- with redirect_output(logger):
- generate_supported_ops()
diff --git a/src/mlia/backend/vela/performance.py b/src/mlia/backend/vela/performance.py
new file mode 100644
index 0000000..ccd2f6f
--- /dev/null
+++ b/src/mlia/backend/vela/performance.py
@@ -0,0 +1,97 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Vela performance module."""
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+
+import numpy as np
+from ethosu.vela.npu_performance import PassCycles
+from ethosu.vela.tensor import MemArea
+
+from mlia.backend.vela.compiler import OptimizedModel
+from mlia.backend.vela.compiler import VelaCompiler
+from mlia.backend.vela.compiler import VelaCompilerOptions
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class PerformanceMetrics: # pylint: disable=too-many-instance-attributes
+ """Contains all the performance metrics Vela generates in a run."""
+
+ npu_cycles: int
+ sram_access_cycles: int
+ dram_access_cycles: int
+ on_chip_flash_access_cycles: int
+ off_chip_flash_access_cycles: int
+ total_cycles: int
+ batch_inference_time: float
+ inferences_per_second: float
+ batch_size: int
+ unknown_memory_area_size: int
+ sram_memory_area_size: int
+ dram_memory_area_size: int
+ on_chip_flash_memory_area_size: int
+ off_chip_flash_memory_area_size: int
+
+
+def estimate_performance(
+ model_path: Path, compiler_options: VelaCompilerOptions
+) -> PerformanceMetrics:
+ """Return performance estimations for the model/device.
+
+ Logic for this function comes from Vela module stats_writer.py
+ """
+ logger.debug(
+ "Estimate performance for the model %s on %s",
+ model_path,
+ compiler_options.accelerator_config,
+ )
+
+ vela_compiler = VelaCompiler(compiler_options)
+
+ initial_model = vela_compiler.read_model(model_path)
+ if initial_model.optimized:
+ raise Exception("Unable to estimate performance for the given optimized model")
+
+ optimized_model = vela_compiler.compile_model(initial_model)
+
+ return _performance_metrics(optimized_model)
+
+
+def _performance_metrics(optimized_model: OptimizedModel) -> PerformanceMetrics:
+ """Return performance metrics for optimized model."""
+ cycles = optimized_model.nng.cycles
+
+ def memory_usage(mem_area: MemArea) -> int:
+ """Get memory usage for the proviced memory area type."""
+ memory_used: dict[MemArea, int] = optimized_model.nng.memory_used
+ bandwidths = optimized_model.nng.bandwidths
+
+ return memory_used.get(mem_area, 0) if np.sum(bandwidths[mem_area]) > 0 else 0
+
+ midpoint_fps = np.nan
+ midpoint_inference_time = cycles[PassCycles.Total] / optimized_model.arch.core_clock
+ if midpoint_inference_time > 0:
+ midpoint_fps = 1 / midpoint_inference_time
+
+ return PerformanceMetrics(
+ npu_cycles=int(cycles[PassCycles.Npu]),
+ sram_access_cycles=int(cycles[PassCycles.SramAccess]),
+ dram_access_cycles=int(cycles[PassCycles.DramAccess]),
+ on_chip_flash_access_cycles=int(cycles[PassCycles.OnChipFlashAccess]),
+ off_chip_flash_access_cycles=int(cycles[PassCycles.OffChipFlashAccess]),
+ total_cycles=int(cycles[PassCycles.Total]),
+ batch_inference_time=midpoint_inference_time * 1000,
+ inferences_per_second=midpoint_fps,
+ batch_size=optimized_model.nng.batch_size,
+ unknown_memory_area_size=memory_usage(MemArea.Unknown),
+ sram_memory_area_size=memory_usage(MemArea.Sram),
+ dram_memory_area_size=memory_usage(MemArea.Dram),
+ on_chip_flash_memory_area_size=memory_usage(MemArea.OnChipFlash),
+ off_chip_flash_memory_area_size=memory_usage(MemArea.OffChipFlash),
+ )
diff --git a/src/mlia/cli/config.py b/src/mlia/cli/config.py
index 6ea9bb4..2d694dc 100644
--- a/src/mlia/cli/config.py
+++ b/src/mlia/cli/config.py
@@ -6,18 +6,19 @@ from __future__ import annotations
import logging
from functools import lru_cache
-import mlia.backend.manager as backend_manager
-from mlia.tools.metadata.common import DefaultInstallationManager
-from mlia.tools.metadata.common import InstallationManager
-from mlia.tools.metadata.corstone import get_corstone_installations
-from mlia.tools.metadata.py_package import get_pypackage_backend_installations
+from mlia.backend.corstone.install import get_corstone_installations
+from mlia.backend.install import supported_backends
+from mlia.backend.manager import DefaultInstallationManager
+from mlia.backend.manager import InstallationManager
+from mlia.backend.tosa_checker.install import get_tosa_backend_installation
logger = logging.getLogger(__name__)
def get_installation_manager(noninteractive: bool = False) -> InstallationManager:
"""Return installation manager."""
- backends = get_corstone_installations() + get_pypackage_backend_installations()
+ backends = get_corstone_installations()
+ backends.append(get_tosa_backend_installation())
return DefaultInstallationManager(backends, noninteractive=noninteractive)
@@ -31,7 +32,7 @@ def get_available_backends() -> list[str]:
manager = get_installation_manager()
available_backends.extend(
backend
- for backend in backend_manager.supported_backends()
+ for backend in supported_backends()
if manager.backend_installed(backend)
)
diff --git a/src/mlia/devices/ethosu/config.py b/src/mlia/devices/ethosu/config.py
index e44dcdc..f2e867e 100644
--- a/src/mlia/devices/ethosu/config.py
+++ b/src/mlia/devices/ethosu/config.py
@@ -6,9 +6,9 @@ from __future__ import annotations
import logging
from typing import Any
+from mlia.backend.vela.compiler import resolve_compiler_config
+from mlia.backend.vela.compiler import VelaCompilerOptions
from mlia.devices.config import IPConfiguration
-from mlia.tools.vela_wrapper import resolve_compiler_config
-from mlia.tools.vela_wrapper import VelaCompilerOptions
from mlia.utils.filesystem import get_profile
from mlia.utils.filesystem import get_vela_config
diff --git a/src/mlia/devices/ethosu/data_analysis.py b/src/mlia/devices/ethosu/data_analysis.py
index 70b6f65..db89a5f 100644
--- a/src/mlia/devices/ethosu/data_analysis.py
+++ b/src/mlia/devices/ethosu/data_analysis.py
@@ -6,12 +6,12 @@ from __future__ import annotations
from dataclasses import dataclass
from functools import singledispatchmethod
+from mlia.backend.vela.compat import Operators
from mlia.core.common import DataItem
from mlia.core.data_analysis import Fact
from mlia.core.data_analysis import FactExtractor
from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics
from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
-from mlia.tools.vela_wrapper import Operators
@dataclass
diff --git a/src/mlia/devices/ethosu/data_collection.py b/src/mlia/devices/ethosu/data_collection.py
index c8d5293..d68eadb 100644
--- a/src/mlia/devices/ethosu/data_collection.py
+++ b/src/mlia/devices/ethosu/data_collection.py
@@ -6,6 +6,8 @@ from __future__ import annotations
import logging
from pathlib import Path
+from mlia.backend.vela.compat import Operators
+from mlia.backend.vela.compat import supported_operators
from mlia.core.context import Context
from mlia.core.data_collection import ContextAwareDataCollector
from mlia.core.errors import FunctionalityNotSupportedError
@@ -20,8 +22,6 @@ from mlia.nn.tensorflow.config import KerasModel
from mlia.nn.tensorflow.optimizations.select import get_optimizer
from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
from mlia.nn.tensorflow.utils import save_keras_model
-from mlia.tools.vela_wrapper import Operators
-from mlia.tools.vela_wrapper import supported_operators
from mlia.utils.logging import log_action
from mlia.utils.types import is_list_of
diff --git a/src/mlia/devices/ethosu/handlers.py b/src/mlia/devices/ethosu/handlers.py
index 48f9a2e..f010bdb 100644
--- a/src/mlia/devices/ethosu/handlers.py
+++ b/src/mlia/devices/ethosu/handlers.py
@@ -5,6 +5,7 @@ from __future__ import annotations
import logging
+from mlia.backend.vela.compat import Operators
from mlia.core.events import CollectedDataEvent
from mlia.core.handlers import WorkflowEventsHandler
from mlia.core.typing import PathOrFileLike
@@ -13,7 +14,6 @@ from mlia.devices.ethosu.events import EthosUAdvisorStartedEvent
from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics
from mlia.devices.ethosu.performance import PerformanceMetrics
from mlia.devices.ethosu.reporters import ethos_u_formatters
-from mlia.tools.vela_wrapper import Operators
logger = logging.getLogger(__name__)
diff --git a/src/mlia/devices/ethosu/operators.py b/src/mlia/devices/ethosu/operators.py
index 1a4ce8d..97c2b17 100644
--- a/src/mlia/devices/ethosu/operators.py
+++ b/src/mlia/devices/ethosu/operators.py
@@ -3,7 +3,7 @@
"""Operators module."""
import logging
-from mlia.tools import vela_wrapper
+from mlia.backend.vela.compat import generate_supported_operators_report
logger = logging.getLogger(__name__)
@@ -11,4 +11,4 @@ logger = logging.getLogger(__name__)
def report() -> None:
"""Generate supported operators report."""
- vela_wrapper.generate_supported_operators_report()
+ generate_supported_operators_report()
diff --git a/src/mlia/devices/ethosu/performance.py b/src/mlia/devices/ethosu/performance.py
index 431dd89..8051d6e 100644
--- a/src/mlia/devices/ethosu/performance.py
+++ b/src/mlia/devices/ethosu/performance.py
@@ -9,8 +9,13 @@ from enum import Enum
from pathlib import Path
from typing import Union
-import mlia.backend.manager as backend_manager
-import mlia.tools.vela_wrapper as vela
+import mlia.backend.vela.compiler as vela_comp
+import mlia.backend.vela.performance as vela_perf
+from mlia.backend.corstone.performance import DeviceInfo
+from mlia.backend.corstone.performance import estimate_performance
+from mlia.backend.corstone.performance import ModelInfo
+from mlia.backend.install import is_supported
+from mlia.backend.install import supported_backends
from mlia.core.context import Context
from mlia.core.performance import PerformanceEstimator
from mlia.devices.ethosu.config import EthosUConfiguration
@@ -133,7 +138,7 @@ class VelaPerformanceEstimator(
else model
)
- vela_perf_metrics = vela.estimate_performance(
+ vela_perf_metrics = vela_perf.estimate_performance(
model_path, self.device.compiler_options
)
@@ -177,17 +182,17 @@ class CorstonePerformanceEstimator(
f"{model_path.stem}_vela.tflite"
)
- vela.optimize_model(
+ vela_comp.optimize_model(
model_path, self.device.compiler_options, optimized_model_path
)
- model_info = backend_manager.ModelInfo(model_path=optimized_model_path)
- device_info = backend_manager.DeviceInfo(
+ model_info = ModelInfo(model_path=optimized_model_path)
+ device_info = DeviceInfo(
device_type=self.device.target, # type: ignore
mac=self.device.mac,
)
- corstone_perf_metrics = backend_manager.estimate_performance(
+ corstone_perf_metrics = estimate_performance(
model_info, device_info, self.backend
)
@@ -218,10 +223,10 @@ class EthosUPerformanceEstimator(
if backends is None:
backends = ["Vela"] # Only Vela is always available as default
for backend in backends:
- if backend != "Vela" and not backend_manager.is_supported(backend):
+ if backend != "Vela" and not is_supported(backend):
raise ValueError(
f"Unsupported backend '{backend}'. "
- f"Only 'Vela' and {backend_manager.supported_backends()} "
+ f"Only 'Vela' and {supported_backends()} "
"are supported."
)
self.backends = set(backends)
@@ -241,7 +246,7 @@ class EthosUPerformanceEstimator(
if backend == "Vela":
vela_estimator = VelaPerformanceEstimator(self.context, self.device)
memory_usage = vela_estimator.estimate(tflite_model)
- elif backend in backend_manager.supported_backends():
+ elif backend in supported_backends():
corstone_estimator = CorstonePerformanceEstimator(
self.context, self.device, backend
)
diff --git a/src/mlia/devices/ethosu/reporters.py b/src/mlia/devices/ethosu/reporters.py
index f0fcb39..7ecaab1 100644
--- a/src/mlia/devices/ethosu/reporters.py
+++ b/src/mlia/devices/ethosu/reporters.py
@@ -7,6 +7,8 @@ from collections import defaultdict
from typing import Any
from typing import Callable
+from mlia.backend.vela.compat import Operator
+from mlia.backend.vela.compat import Operators
from mlia.core.advice_generation import Advice
from mlia.core.reporters import report_advice
from mlia.core.reporting import BytesCell
@@ -23,8 +25,6 @@ from mlia.core.reporting import SingleRow
from mlia.core.reporting import Table
from mlia.devices.ethosu.config import EthosUConfiguration
from mlia.devices.ethosu.performance import PerformanceMetrics
-from mlia.tools.vela_wrapper import Operator
-from mlia.tools.vela_wrapper import Operators
from mlia.utils.console import style_improvement
from mlia.utils.types import is_list_of
diff --git a/src/mlia/tools/metadata/common.py b/src/mlia/tools/metadata/common.py
deleted file mode 100644
index 5019da9..0000000
--- a/src/mlia/tools/metadata/common.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Module for installation process."""
-from __future__ import annotations
-
-import logging
-from abc import ABC
-from abc import abstractmethod
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Callable
-from typing import Union
-
-from mlia.core.errors import ConfigurationError
-from mlia.core.errors import InternalError
-from mlia.utils.misc import yes
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class InstallFromPath:
- """Installation from the local path."""
-
- backend_path: Path
-
-
-@dataclass
-class DownloadAndInstall:
- """Download and install."""
-
- eula_agreement: bool = True
-
-
-InstallationType = Union[InstallFromPath, DownloadAndInstall]
-
-
-class Installation(ABC):
- """Base class for the installation process of the backends."""
-
- @property
- @abstractmethod
- def name(self) -> str:
- """Return name of the backend."""
-
- @property
- @abstractmethod
- def description(self) -> str:
- """Return description of the backend."""
-
- @property
- @abstractmethod
- def could_be_installed(self) -> bool:
- """Return true if backend could be installed in current environment."""
-
- @property
- @abstractmethod
- def already_installed(self) -> bool:
- """Return true if backend is already installed."""
-
- @abstractmethod
- def supports(self, install_type: InstallationType) -> bool:
- """Return true if installation supports requested installation type."""
-
- @abstractmethod
- def install(self, install_type: InstallationType) -> None:
- """Install the backend."""
-
- @abstractmethod
- def uninstall(self) -> None:
- """Uninstall the backend."""
-
-
-InstallationFilter = Callable[[Installation], bool]
-
-
-class AlreadyInstalledFilter:
- """Filter for already installed backends."""
-
- def __call__(self, installation: Installation) -> bool:
- """Installation filter."""
- return installation.already_installed
-
-
-class ReadyForInstallationFilter:
- """Filter for ready to be installed backends."""
-
- def __call__(self, installation: Installation) -> bool:
- """Installation filter."""
- return installation.could_be_installed and not installation.already_installed
-
-
-class SupportsInstallTypeFilter:
- """Filter backends that support certain type of the installation."""
-
- def __init__(self, installation_type: InstallationType) -> None:
- """Init filter."""
- self.installation_type = installation_type
-
- def __call__(self, installation: Installation) -> bool:
- """Installation filter."""
- return installation.supports(self.installation_type)
-
-
-class SearchByNameFilter:
- """Filter installation by name."""
-
- def __init__(self, backend_name: str | None) -> None:
- """Init filter."""
- self.backend_name = backend_name
-
- def __call__(self, installation: Installation) -> bool:
- """Installation filter."""
- return (
- not self.backend_name
- or installation.name.casefold() == self.backend_name.casefold()
- )
-
-
-class InstallationManager(ABC):
- """Helper class for managing installations."""
-
- @abstractmethod
- def install_from(self, backend_path: Path, backend_name: str, force: bool) -> None:
- """Install backend from the local directory."""
-
- @abstractmethod
- def download_and_install(
- self, backend_name: str, eula_agreement: bool, force: bool
- ) -> None:
- """Download and install backends."""
-
- @abstractmethod
- def show_env_details(self) -> None:
- """Show environment details."""
-
- @abstractmethod
- def backend_installed(self, backend_name: str) -> bool:
- """Return true if requested backend installed."""
-
- @abstractmethod
- def uninstall(self, backend_name: str) -> None:
- """Delete the existing installation."""
-
-
-class InstallationFiltersMixin:
- """Mixin for filtering installation based on different conditions."""
-
- installations: list[Installation]
-
- def filter_by(self, *filters: InstallationFilter) -> list[Installation]:
- """Filter installations."""
- return [
- installation
- for installation in self.installations
- if all(filter_(installation) for filter_ in filters)
- ]
-
- def find_by_name(self, backend_name: str) -> list[Installation]:
- """Return list of the backends filtered by name."""
- return self.filter_by(SearchByNameFilter(backend_name))
-
- def already_installed(self, backend_name: str = None) -> list[Installation]:
- """Return list of backends that are already installed."""
- return self.filter_by(
- AlreadyInstalledFilter(),
- SearchByNameFilter(backend_name),
- )
-
- def ready_for_installation(self) -> list[Installation]:
- """Return list of the backends that could be installed."""
- return self.filter_by(ReadyForInstallationFilter())
-
-
-class DefaultInstallationManager(InstallationManager, InstallationFiltersMixin):
- """Interactive installation manager."""
-
- def __init__(
- self, installations: list[Installation], noninteractive: bool = False
- ) -> None:
- """Init the manager."""
- self.installations = installations
- self.noninteractive = noninteractive
-
- def _install(
- self,
- backend_name: str,
- install_type: InstallationType,
- prompt: Callable[[Installation], str],
- force: bool,
- ) -> None:
- """Check metadata and install backend."""
- installs = self.find_by_name(backend_name)
-
- if not installs:
- logger.info("Unknown backend '%s'.", backend_name)
- logger.info(
- "Please run command 'mlia-backend list' to get list of "
- "supported backend names."
- )
-
- return
-
- if len(installs) > 1:
- raise InternalError(f"More than one backend with name {backend_name} found")
-
- installation = installs[0]
- if not installation.supports(install_type):
- if isinstance(install_type, InstallFromPath):
- logger.info(
- "Backend '%s' could not be installed using path '%s'.",
- installation.name,
- install_type.backend_path,
- )
- logger.info(
- "Please check that '%s' is a valid path to the installed backend.",
- install_type.backend_path,
- )
- else:
- logger.info(
- "Backend '%s' could not be downloaded and installed",
- installation.name,
- )
- logger.info(
- "Please refer to the project's documentation for more details."
- )
-
- return
-
- if installation.already_installed and not force:
- logger.info("Backend '%s' is already installed.", installation.name)
- logger.info("Please, consider using --force option.")
- return
-
- proceed = self.noninteractive or yes(prompt(installation))
- if not proceed:
- logger.info("%s installation canceled.", installation.name)
- return
-
- if installation.already_installed and force:
- logger.info(
- "Force installing %s, so delete the existing "
- "installed backend first.",
- installation.name,
- )
- installation.uninstall()
-
- installation.install(install_type)
- logger.info("%s successfully installed.", installation.name)
-
- def install_from(
- self, backend_path: Path, backend_name: str, force: bool = False
- ) -> None:
- """Install from the provided directory."""
-
- def prompt(install: Installation) -> str:
- return (
- f"{install.name} was found in {backend_path}. "
- "Would you like to install it?"
- )
-
- install_type = InstallFromPath(backend_path)
- self._install(backend_name, install_type, prompt, force)
-
- def download_and_install(
- self, backend_name: str, eula_agreement: bool = True, force: bool = False
- ) -> None:
- """Download and install available backends."""
-
- def prompt(install: Installation) -> str:
- return f"Would you like to download and install {install.name}?"
-
- install_type = DownloadAndInstall(eula_agreement=eula_agreement)
- self._install(backend_name, install_type, prompt, force)
-
- def show_env_details(self) -> None:
- """Print current state of the execution environment."""
- if installed := self.already_installed():
- self._print_installation_list("Installed backends:", installed)
-
- if could_be_installed := self.ready_for_installation():
- self._print_installation_list(
- "Following backends could be installed:",
- could_be_installed,
- new_section=bool(installed),
- )
-
- if not installed and not could_be_installed:
- logger.info("No backends installed")
-
- @staticmethod
- def _print_installation_list(
- header: str, installations: list[Installation], new_section: bool = False
- ) -> None:
- """Print list of the installations."""
- logger.info("%s%s\n", "\n" if new_section else "", header)
-
- for installation in installations:
- logger.info(" - %s", installation.name)
-
- def uninstall(self, backend_name: str) -> None:
- """Uninstall the backend with name backend_name."""
- installations = self.already_installed(backend_name)
-
- if not installations:
- raise ConfigurationError(f"Backend '{backend_name}' is not installed")
-
- if len(installations) != 1:
- raise InternalError(
- f"More than one installed backend with name {backend_name} found"
- )
-
- installation = installations[0]
- installation.uninstall()
-
- logger.info("%s successfully uninstalled.", installation.name)
-
- def backend_installed(self, backend_name: str) -> bool:
- """Return true if requested backend installed."""
- installations = self.already_installed(backend_name)
-
- return len(installations) == 1
diff --git a/src/mlia/tools/metadata/py_package.py b/src/mlia/tools/metadata/py_package.py
deleted file mode 100644
index 716b62a..0000000
--- a/src/mlia/tools/metadata/py_package.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Module for python package based installations."""
-from __future__ import annotations
-
-from mlia.tools.metadata.common import DownloadAndInstall
-from mlia.tools.metadata.common import Installation
-from mlia.tools.metadata.common import InstallationType
-from mlia.utils.py_manager import get_package_manager
-
-
-class PyPackageBackendInstallation(Installation):
- """Backend based on the python package."""
-
- def __init__(
- self,
- name: str,
- description: str,
- packages_to_install: list[str],
- packages_to_uninstall: list[str],
- expected_packages: list[str],
- ) -> None:
- """Init the backend installation."""
- self._name = name
- self._description = description
- self._packages_to_install = packages_to_install
- self._packages_to_uninstall = packages_to_uninstall
- self._expected_packages = expected_packages
-
- self.package_manager = get_package_manager()
-
- @property
- def name(self) -> str:
- """Return name of the backend."""
- return self._name
-
- @property
- def description(self) -> str:
- """Return description of the backend."""
- return self._description
-
- @property
- def could_be_installed(self) -> bool:
- """Check if backend could be installed."""
- return True
-
- @property
- def already_installed(self) -> bool:
- """Check if backend already installed."""
- return self.package_manager.packages_installed(self._expected_packages)
-
- def supports(self, install_type: InstallationType) -> bool:
- """Return true if installation supports requested installation type."""
- return isinstance(install_type, DownloadAndInstall)
-
- def install(self, install_type: InstallationType) -> None:
- """Install the backend."""
- if not self.supports(install_type):
- raise Exception(f"Unsupported installation type {install_type}")
-
- self.package_manager.install(self._packages_to_install)
-
- def uninstall(self) -> None:
- """Uninstall the backend."""
- self.package_manager.uninstall(self._packages_to_uninstall)
-
-
-def get_tosa_backend_installation() -> Installation:
- """Get TOSA backend installation."""
- return PyPackageBackendInstallation(
- name="tosa-checker",
- description="Tool to check if a ML model is compatible "
- "with the TOSA specification",
- packages_to_install=["mlia[tosa]"],
- packages_to_uninstall=["tosa-checker"],
- expected_packages=["tosa-checker"],
- )
-
-
-def get_pypackage_backend_installations() -> list[Installation]:
- """Return list of the backend installations based on python packages."""
- return [
- get_tosa_backend_installation(),
- ]
diff --git a/tests/conftest.py b/tests/conftest.py
index b1f32dc..feb2aa0 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -10,12 +10,12 @@ from typing import Generator
import pytest
import tensorflow as tf
+from mlia.backend.vela.compiler import optimize_model
from mlia.core.context import ExecutionContext
from mlia.devices.ethosu.config import EthosUConfiguration
from mlia.nn.tensorflow.utils import convert_to_tflite
from mlia.nn.tensorflow.utils import save_keras_model
from mlia.nn.tensorflow.utils import save_tflite_model
-from mlia.tools.vela_wrapper import optimize_model
@pytest.fixture(scope="session", name="test_resources_path")
@@ -68,7 +68,9 @@ def test_resources(monkeypatch: pytest.MonkeyPatch, test_resources_path: Path) -
"""Return path to the test resources."""
return test_resources_path / "backends"
- monkeypatch.setattr("mlia.backend.fs.get_backend_resources", get_test_resources)
+ monkeypatch.setattr(
+ "mlia.backend.executor.fs.get_backend_resources", get_test_resources
+ )
yield
diff --git a/tests/test_api.py b/tests/test_api.py
index 6fa15b3..b9ab8ea 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -118,12 +118,12 @@ def test_get_advisor(
[
[
"ethos-u55-128",
- "mlia.tools.vela_wrapper.generate_supported_operators_report",
+ "mlia.devices.ethosu.operators.generate_supported_operators_report",
None,
],
[
"ethos-u65-256",
- "mlia.tools.vela_wrapper.generate_supported_operators_report",
+ "mlia.devices.ethosu.operators.generate_supported_operators_report",
None,
],
[
diff --git a/tests/test_tools_metadata_corstone.py b/tests/test_backend_corstone_install.py
index a7d81f2..3b05a49 100644
--- a/tests/test_tools_metadata_corstone.py
+++ b/tests/test_backend_corstone_install.py
@@ -10,21 +10,21 @@ from unittest.mock import MagicMock
import pytest
-from mlia.backend.manager import BackendRunner
-from mlia.tools.metadata.common import DownloadAndInstall
-from mlia.tools.metadata.common import InstallFromPath
-from mlia.tools.metadata.corstone import BackendInfo
-from mlia.tools.metadata.corstone import BackendInstallation
-from mlia.tools.metadata.corstone import BackendInstaller
-from mlia.tools.metadata.corstone import BackendMetadata
-from mlia.tools.metadata.corstone import CompoundPathChecker
-from mlia.tools.metadata.corstone import Corstone300Installer
-from mlia.tools.metadata.corstone import get_corstone_300_installation
-from mlia.tools.metadata.corstone import get_corstone_310_installation
-from mlia.tools.metadata.corstone import get_corstone_installations
-from mlia.tools.metadata.corstone import PackagePathChecker
-from mlia.tools.metadata.corstone import PathChecker
-from mlia.tools.metadata.corstone import StaticPathChecker
+from mlia.backend.corstone.install import Corstone300Installer
+from mlia.backend.corstone.install import get_corstone_300_installation
+from mlia.backend.corstone.install import get_corstone_310_installation
+from mlia.backend.corstone.install import get_corstone_installations
+from mlia.backend.corstone.install import PackagePathChecker
+from mlia.backend.corstone.install import StaticPathChecker
+from mlia.backend.executor.runner import BackendRunner
+from mlia.backend.install import BackendInfo
+from mlia.backend.install import BackendInstallation
+from mlia.backend.install import BackendInstaller
+from mlia.backend.install import BackendMetadata
+from mlia.backend.install import CompoundPathChecker
+from mlia.backend.install import DownloadAndInstall
+from mlia.backend.install import InstallFromPath
+from mlia.backend.install import PathChecker
@pytest.fixture(name="test_mlia_resources")
@@ -36,7 +36,7 @@ def fixture_test_mlia_resources(
mlia_resources.mkdir()
monkeypatch.setattr(
- "mlia.tools.metadata.corstone.get_mlia_resources",
+ "mlia.backend.install.get_mlia_resources",
MagicMock(return_value=mlia_resources),
)
@@ -88,10 +88,12 @@ def test_could_be_installed_depends_on_platform(
) -> None:
"""Test that installation could not be installed on unsupported platform."""
monkeypatch.setattr(
- "mlia.tools.metadata.corstone.platform.system", MagicMock(return_value=platform)
+ "mlia.backend.install.platform.system",
+ MagicMock(return_value=platform),
)
monkeypatch.setattr(
- "mlia.tools.metadata.corstone.all_paths_valid", MagicMock(return_value=True)
+ "mlia.backend.install.all_paths_valid",
+ MagicMock(return_value=True),
)
backend_runner_mock = MagicMock(spec=BackendRunner)
@@ -413,7 +415,7 @@ def test_corstone_300_installer(
command_mock = MagicMock()
monkeypatch.setattr(
- "mlia.tools.metadata.corstone.subprocess.check_call", command_mock
+ "mlia.backend.corstone.install.subprocess.check_call", command_mock
)
installer = Corstone300Installer()
result = installer(eula_agreement, tmp_path)
@@ -455,14 +457,14 @@ def test_corstone_vht_install(
create_destination_and_install_mock = MagicMock()
+ monkeypatch.setattr("mlia.backend.install.all_files_exist", _all_files_exist)
+
monkeypatch.setattr(
- "mlia.tools.metadata.corstone.all_files_exist", _all_files_exist
+ "mlia.backend.executor.system.get_available_systems", lambda: []
)
- monkeypatch.setattr("mlia.backend.system.get_available_systems", lambda: [])
-
monkeypatch.setattr(
- "mlia.backend.system.create_destination_and_install",
+ "mlia.backend.executor.system.create_destination_and_install",
create_destination_and_install_mock,
)
@@ -478,7 +480,7 @@ def test_corstone_uninstall(
remove_system_mock = MagicMock()
monkeypatch.setattr(
- "mlia.tools.metadata.corstone.remove_system",
+ "mlia.backend.install.remove_system",
remove_system_mock,
)
diff --git a/tests/test_backend_corstone_performance.py b/tests/test_backend_corstone_performance.py
new file mode 100644
index 0000000..1734eb9
--- /dev/null
+++ b/tests/test_backend_corstone_performance.py
@@ -0,0 +1,519 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for module backend/manager."""
+from __future__ import annotations
+
+import base64
+import json
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from unittest.mock import MagicMock
+from unittest.mock import PropertyMock
+
+import pytest
+
+from mlia.backend.corstone.performance import BackendRunner
+from mlia.backend.corstone.performance import DeviceInfo
+from mlia.backend.corstone.performance import estimate_performance
+from mlia.backend.corstone.performance import GenericInferenceOutputParser
+from mlia.backend.corstone.performance import GenericInferenceRunnerEthosU
+from mlia.backend.corstone.performance import get_generic_runner
+from mlia.backend.corstone.performance import ModelInfo
+from mlia.backend.corstone.performance import PerformanceMetrics
+from mlia.backend.executor.application import get_application
+from mlia.backend.executor.execution import ExecutionContext
+from mlia.backend.executor.output_consumer import Base64OutputConsumer
+from mlia.backend.executor.system import get_system
+from mlia.backend.install import get_system_name
+from mlia.backend.install import is_supported
+from mlia.backend.install import supported_backends
+
+
+def _mock_encode_b64(data: dict[str, int]) -> str:
+ """
+ Encode the given data into a mock base64-encoded string of JSON.
+
+ This reproduces the base64 encoding done in the Corstone applications.
+
+ JSON example:
+
+ ```json
+ [{'count': 1,
+ 'profiling_group': 'Inference',
+ 'samples': [{'name': 'NPU IDLE', 'value': [612]},
+ {'name': 'NPU AXI0_RD_DATA_BEAT_RECEIVED', 'value': [165872]},
+ {'name': 'NPU AXI0_WR_DATA_BEAT_WRITTEN', 'value': [88712]},
+ {'name': 'NPU AXI1_RD_DATA_BEAT_RECEIVED', 'value': [57540]},
+ {'name': 'NPU ACTIVE', 'value': [520489]},
+ {'name': 'NPU TOTAL', 'value': [521101]}]}]
+ ```
+ """
+ wrapped_data = [
+ {
+ "count": 1,
+ "profiling_group": "Inference",
+ "samples": [
+ {"name": name, "value": [value]} for name, value in data.items()
+ ],
+ }
+ ]
+ json_str = json.dumps(wrapped_data)
+ json_bytes = bytearray(json_str, encoding="utf-8")
+ json_b64 = base64.b64encode(json_bytes).decode("utf-8")
+ tag = Base64OutputConsumer.TAG_NAME
+ return f"<{tag}>{json_b64}</{tag}>"
+
+
+@pytest.mark.parametrize(
+ "data, is_ready, result, missed_keys",
+ [
+ (
+ [],
+ False,
+ {},
+ {
+ "npu_active_cycles",
+ "npu_axi0_rd_data_beat_received",
+ "npu_axi0_wr_data_beat_written",
+ "npu_axi1_rd_data_beat_received",
+ "npu_idle_cycles",
+ "npu_total_cycles",
+ },
+ ),
+ (
+ ["sample text"],
+ False,
+ {},
+ {
+ "npu_active_cycles",
+ "npu_axi0_rd_data_beat_received",
+ "npu_axi0_wr_data_beat_written",
+ "npu_axi1_rd_data_beat_received",
+ "npu_idle_cycles",
+ "npu_total_cycles",
+ },
+ ),
+ (
+ [_mock_encode_b64({"NPU AXI0_RD_DATA_BEAT_RECEIVED": 123})],
+ False,
+ {"npu_axi0_rd_data_beat_received": 123},
+ {
+ "npu_active_cycles",
+ "npu_axi0_wr_data_beat_written",
+ "npu_axi1_rd_data_beat_received",
+ "npu_idle_cycles",
+ "npu_total_cycles",
+ },
+ ),
+ (
+ [
+ _mock_encode_b64(
+ {
+ "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
+ "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
+ "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
+ "NPU ACTIVE": 4,
+ "NPU IDLE": 5,
+ "NPU TOTAL": 6,
+ }
+ )
+ ],
+ True,
+ {
+ "npu_axi0_rd_data_beat_received": 1,
+ "npu_axi0_wr_data_beat_written": 2,
+ "npu_axi1_rd_data_beat_received": 3,
+ "npu_active_cycles": 4,
+ "npu_idle_cycles": 5,
+ "npu_total_cycles": 6,
+ },
+ set(),
+ ),
+ ],
+)
+def test_generic_inference_output_parser(
+ data: dict[str, int], is_ready: bool, result: dict, missed_keys: set[str]
+) -> None:
+ """Test generic runner output parser."""
+ parser = GenericInferenceOutputParser()
+
+ for line in data:
+ parser.feed(line)
+
+ assert parser.is_ready() == is_ready
+ assert parser.result == result
+ assert parser.missed_keys() == missed_keys
+
+
+@pytest.mark.parametrize(
+ "device, system, application, backend, expected_error",
+ [
+ (
+ DeviceInfo(device_type="ethos-u55", mac=32),
+ ("Corstone-300: Cortex-M55+Ethos-U55", True),
+ ("Generic Inference Runner: Ethos-U55", True),
+ "Corstone-300",
+ does_not_raise(),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u55", mac=32),
+ ("Corstone-300: Cortex-M55+Ethos-U55", False),
+ ("Generic Inference Runner: Ethos-U55", False),
+ "Corstone-300",
+ pytest.raises(
+ Exception,
+ match=r"System Corstone-300: Cortex-M55\+Ethos-U55 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u55", mac=32),
+ ("Corstone-300: Cortex-M55+Ethos-U55", True),
+ ("Generic Inference Runner: Ethos-U55", False),
+ "Corstone-300",
+ pytest.raises(
+ Exception,
+ match=r"Application Generic Inference Runner: Ethos-U55 "
+ r"for the system Corstone-300: Cortex-M55\+Ethos-U55 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u55", mac=32),
+ ("Corstone-310: Cortex-M85+Ethos-U55", True),
+ ("Generic Inference Runner: Ethos-U55", True),
+ "Corstone-310",
+ does_not_raise(),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u55", mac=32),
+ ("Corstone-310: Cortex-M85+Ethos-U55", False),
+ ("Generic Inference Runner: Ethos-U55", False),
+ "Corstone-310",
+ pytest.raises(
+ Exception,
+ match=r"System Corstone-310: Cortex-M85\+Ethos-U55 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u55", mac=32),
+ ("Corstone-310: Cortex-M85+Ethos-U55", True),
+ ("Generic Inference Runner: Ethos-U55", False),
+ "Corstone-310",
+ pytest.raises(
+ Exception,
+ match=r"Application Generic Inference Runner: Ethos-U55 "
+ r"for the system Corstone-310: Cortex-M85\+Ethos-U55 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u65", mac=512),
+ ("Corstone-300: Cortex-M55+Ethos-U65", True),
+ ("Generic Inference Runner: Ethos-U65", True),
+ "Corstone-300",
+ does_not_raise(),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u65", mac=512),
+ ("Corstone-300: Cortex-M55+Ethos-U65", False),
+ ("Generic Inference Runner: Ethos-U65", False),
+ "Corstone-300",
+ pytest.raises(
+ Exception,
+ match=r"System Corstone-300: Cortex-M55\+Ethos-U65 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u65", mac=512),
+ ("Corstone-300: Cortex-M55+Ethos-U65", True),
+ ("Generic Inference Runner: Ethos-U65", False),
+ "Corstone-300",
+ pytest.raises(
+ Exception,
+ match=r"Application Generic Inference Runner: Ethos-U65 "
+ r"for the system Corstone-300: Cortex-M55\+Ethos-U65 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u65", mac=512),
+ ("Corstone-310: Cortex-M85+Ethos-U65", True),
+ ("Generic Inference Runner: Ethos-U65", True),
+ "Corstone-310",
+ does_not_raise(),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u65", mac=512),
+ ("Corstone-310: Cortex-M85+Ethos-U65", False),
+ ("Generic Inference Runner: Ethos-U65", False),
+ "Corstone-310",
+ pytest.raises(
+ Exception,
+ match=r"System Corstone-310: Cortex-M85\+Ethos-U65 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(device_type="ethos-u65", mac=512),
+ ("Corstone-310: Cortex-M85+Ethos-U65", True),
+ ("Generic Inference Runner: Ethos-U65", False),
+ "Corstone-310",
+ pytest.raises(
+ Exception,
+ match=r"Application Generic Inference Runner: Ethos-U65 "
+ r"for the system Corstone-310: Cortex-M85\+Ethos-U65 is not installed",
+ ),
+ ),
+ (
+ DeviceInfo(
+ device_type="unknown_device", # type: ignore
+ mac=None, # type: ignore
+ ),
+ ("some_system", False),
+ ("some_application", False),
+ "some backend",
+ pytest.raises(Exception, match="Unsupported device unknown_device"),
+ ),
+ ],
+)
+def test_estimate_performance(
+ device: DeviceInfo,
+ system: tuple[str, bool],
+ application: tuple[str, bool],
+ backend: str,
+ expected_error: Any,
+ test_tflite_model: Path,
+ backend_runner: MagicMock,
+) -> None:
+ """Test getting performance estimations."""
+ system_name, system_installed = system
+ application_name, application_installed = application
+
+ backend_runner.is_system_installed.return_value = system_installed
+ backend_runner.is_application_installed.return_value = application_installed
+
+ mock_context = create_mock_context(
+ [
+ _mock_encode_b64(
+ {
+ "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
+ "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
+ "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
+ "NPU ACTIVE": 4,
+ "NPU IDLE": 5,
+ "NPU TOTAL": 6,
+ }
+ )
+ ]
+ )
+
+ backend_runner.run_application.return_value = mock_context
+
+ with expected_error:
+ perf_metrics = estimate_performance(
+ ModelInfo(test_tflite_model), device, backend
+ )
+
+ assert isinstance(perf_metrics, PerformanceMetrics)
+ assert perf_metrics == PerformanceMetrics(
+ npu_axi0_rd_data_beat_received=1,
+ npu_axi0_wr_data_beat_written=2,
+ npu_axi1_rd_data_beat_received=3,
+ npu_active_cycles=4,
+ npu_idle_cycles=5,
+ npu_total_cycles=6,
+ )
+
+ assert backend_runner.is_system_installed.called_once_with(system_name)
+ assert backend_runner.is_application_installed.called_once_with(
+ application_name, system_name
+ )
+
+
+@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
+def test_estimate_performance_insufficient_data(
+ backend_runner: MagicMock, test_tflite_model: Path, backend: str
+) -> None:
+ """Test that performance could not be estimated when not all data presented."""
+ backend_runner.is_system_installed.return_value = True
+ backend_runner.is_application_installed.return_value = True
+
+ no_total_cycles_output = {
+ "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
+ "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
+ "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
+ "NPU ACTIVE": 4,
+ "NPU IDLE": 5,
+ }
+ mock_context = create_mock_context([_mock_encode_b64(no_total_cycles_output)])
+
+ backend_runner.run_application.return_value = mock_context
+
+ with pytest.raises(
+ Exception, match="Unable to get performance metrics, insufficient data"
+ ):
+ device = DeviceInfo(device_type="ethos-u55", mac=32)
+ estimate_performance(ModelInfo(test_tflite_model), device, backend)
+
+
+def create_mock_process(stdout: list[str], stderr: list[str]) -> MagicMock:
+ """Mock underlying process."""
+ mock_process = MagicMock()
+ mock_process.poll.return_value = 0
+ type(mock_process).stdout = PropertyMock(return_value=iter(stdout))
+ type(mock_process).stderr = PropertyMock(return_value=iter(stderr))
+ return mock_process
+
+
+def create_mock_context(stdout: list[str]) -> ExecutionContext:
+ """Mock ExecutionContext."""
+ ctx = ExecutionContext(
+ app=get_application("application_1")[0],
+ app_params=[],
+ system=get_system("System 1"),
+ system_params=[],
+ )
+ ctx.stdout = bytearray("\n".join(stdout).encode("utf-8"))
+ return ctx
+
+
+@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
+def test_estimate_performance_invalid_output(
+ test_tflite_model: Path, backend_runner: MagicMock, backend: str
+) -> None:
+ """Test estimation could not be done if inference produces unexpected output."""
+ backend_runner.is_system_installed.return_value = True
+ backend_runner.is_application_installed.return_value = True
+
+ mock_context = create_mock_context(["Something", "is", "wrong"])
+ backend_runner.run_application.return_value = mock_context
+
+ with pytest.raises(Exception, match="Unable to get performance metrics"):
+ estimate_performance(
+ ModelInfo(test_tflite_model),
+ DeviceInfo(device_type="ethos-u55", mac=256),
+ backend=backend,
+ )
+
+
+@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
+def test_get_generic_runner(backend: str) -> None:
+ """Test function get_generic_runner()."""
+ device_info = DeviceInfo("ethos-u55", 256)
+
+ runner = get_generic_runner(device_info=device_info, backend=backend)
+ assert isinstance(runner, GenericInferenceRunnerEthosU)
+
+ with pytest.raises(RuntimeError):
+ get_generic_runner(device_info=device_info, backend="UNKNOWN_BACKEND")
+
+
+@pytest.mark.parametrize(
+ ("backend", "device_type"),
+ (
+ ("Corstone-300", "ethos-u55"),
+ ("Corstone-300", "ethos-u65"),
+ ("Corstone-310", "ethos-u55"),
+ ),
+)
+def test_backend_support(backend: str, device_type: str) -> None:
+ """Test backend & device support."""
+ assert is_supported(backend)
+ assert is_supported(backend, device_type)
+
+ assert get_system_name(backend, device_type)
+
+ assert backend in supported_backends()
+
+
+class TestGenericInferenceRunnerEthosU:
+ """Test for the class GenericInferenceRunnerEthosU."""
+
+ @staticmethod
+ @pytest.mark.parametrize(
+ "device, backend, expected_system, expected_app",
+ [
+ [
+ DeviceInfo("ethos-u55", 256),
+ "Corstone-300",
+ "Corstone-300: Cortex-M55+Ethos-U55",
+ "Generic Inference Runner: Ethos-U55",
+ ],
+ [
+ DeviceInfo("ethos-u65", 256),
+ "Corstone-300",
+ "Corstone-300: Cortex-M55+Ethos-U65",
+ "Generic Inference Runner: Ethos-U65",
+ ],
+ [
+ DeviceInfo("ethos-u55", 256),
+ "Corstone-310",
+ "Corstone-310: Cortex-M85+Ethos-U55",
+ "Generic Inference Runner: Ethos-U55",
+ ],
+ [
+ DeviceInfo("ethos-u65", 256),
+ "Corstone-310",
+ "Corstone-310: Cortex-M85+Ethos-U65",
+ "Generic Inference Runner: Ethos-U65",
+ ],
+ ],
+ )
+ def test_artifact_resolver(
+ device: DeviceInfo, backend: str, expected_system: str, expected_app: str
+ ) -> None:
+ """Test artifact resolving based on the provided parameters."""
+ generic_runner = get_generic_runner(device, backend)
+ assert isinstance(generic_runner, GenericInferenceRunnerEthosU)
+
+ assert generic_runner.system_name == expected_system
+ assert generic_runner.app_name == expected_app
+
+ @staticmethod
+ def test_artifact_resolver_unsupported_backend() -> None:
+ """Test that it should be not possible to use unsupported backends."""
+ with pytest.raises(
+ RuntimeError, match="Unsupported device ethos-u65 for backend test_backend"
+ ):
+ get_generic_runner(DeviceInfo("ethos-u65", 256), "test_backend")
+
+ @staticmethod
+ @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
+ def test_inference_should_fail_if_system_not_installed(
+ backend_runner: MagicMock, test_tflite_model: Path, backend: str
+ ) -> None:
+ """Test that inference should fail if system is not installed."""
+ backend_runner.is_system_installed.return_value = False
+
+ generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend)
+ with pytest.raises(
+ Exception,
+ match=r"System Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not installed",
+ ):
+ generic_runner.run(ModelInfo(test_tflite_model), [])
+
+ @staticmethod
+ @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
+ def test_inference_should_fail_is_apps_not_installed(
+ backend_runner: MagicMock, test_tflite_model: Path, backend: str
+ ) -> None:
+ """Test that inference should fail if apps are not installed."""
+ backend_runner.is_system_installed.return_value = True
+ backend_runner.is_application_installed.return_value = False
+
+ generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend)
+ with pytest.raises(
+ Exception,
+ match="Application Generic Inference Runner: Ethos-U55"
+ r" for the system Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not "
+ r"installed",
+ ):
+ generic_runner.run(ModelInfo(test_tflite_model), [])
+
+
+@pytest.fixture(name="backend_runner")
+def fixture_backend_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock:
+ """Mock backend runner."""
+ backend_runner_mock = MagicMock(spec=BackendRunner)
+ monkeypatch.setattr(
+ "mlia.backend.corstone.performance.get_backend_runner",
+ MagicMock(return_value=backend_runner_mock),
+ )
+ return backend_runner_mock
diff --git a/tests/test_backend_application.py b/tests/test_backend_executor_application.py
index 478658b..8962a0a 100644
--- a/tests/test_backend_application.py
+++ b/tests/test_backend_executor_application.py
@@ -11,20 +11,22 @@ from unittest.mock import MagicMock
import pytest
-from mlia.backend.application import Application
-from mlia.backend.application import get_application
-from mlia.backend.application import get_available_application_directory_names
-from mlia.backend.application import get_available_applications
-from mlia.backend.application import get_unique_application_names
-from mlia.backend.application import install_application
-from mlia.backend.application import load_applications
-from mlia.backend.application import remove_application
-from mlia.backend.common import Command
-from mlia.backend.common import Param
-from mlia.backend.common import UserParamConfig
-from mlia.backend.config import ApplicationConfig
-from mlia.backend.config import ExtendedApplicationConfig
-from mlia.backend.config import NamedExecutionConfig
+from mlia.backend.executor.application import Application
+from mlia.backend.executor.application import get_application
+from mlia.backend.executor.application import (
+ get_available_application_directory_names,
+)
+from mlia.backend.executor.application import get_available_applications
+from mlia.backend.executor.application import get_unique_application_names
+from mlia.backend.executor.application import install_application
+from mlia.backend.executor.application import load_applications
+from mlia.backend.executor.application import remove_application
+from mlia.backend.executor.common import Command
+from mlia.backend.executor.common import Param
+from mlia.backend.executor.common import UserParamConfig
+from mlia.backend.executor.config import ApplicationConfig
+from mlia.backend.executor.config import ExtendedApplicationConfig
+from mlia.backend.executor.config import NamedExecutionConfig
def test_get_available_application_directory_names() -> None:
@@ -151,7 +153,7 @@ def test_install_application(
"""Test application install from archive."""
mock_create_destination_and_install = MagicMock()
monkeypatch.setattr(
- "mlia.backend.application.create_destination_and_install",
+ "mlia.backend.executor.application.create_destination_and_install",
mock_create_destination_and_install,
)
@@ -163,7 +165,9 @@ def test_install_application(
def test_remove_application(monkeypatch: Any) -> None:
"""Test application removal."""
mock_remove_backend = MagicMock()
- monkeypatch.setattr("mlia.backend.application.remove_backend", mock_remove_backend)
+ monkeypatch.setattr(
+ "mlia.backend.executor.application.remove_backend", mock_remove_backend
+ )
remove_application("some_application_directory")
mock_remove_backend.assert_called_once()
diff --git a/tests/test_backend_common.py b/tests/test_backend_executor_common.py
index 4f4853e..e881462 100644
--- a/tests/test_backend_common.py
+++ b/tests/test_backend_executor_common.py
@@ -14,20 +14,20 @@ from unittest.mock import MagicMock
import pytest
-from mlia.backend.application import Application
-from mlia.backend.common import Backend
-from mlia.backend.common import BaseBackendConfig
-from mlia.backend.common import Command
-from mlia.backend.common import ConfigurationException
-from mlia.backend.common import load_config
-from mlia.backend.common import Param
-from mlia.backend.common import parse_raw_parameter
-from mlia.backend.common import remove_backend
-from mlia.backend.config import ApplicationConfig
-from mlia.backend.config import UserParamConfig
-from mlia.backend.execution import ExecutionContext
-from mlia.backend.execution import ParamResolver
-from mlia.backend.system import System
+from mlia.backend.executor.application import Application
+from mlia.backend.executor.common import Backend
+from mlia.backend.executor.common import BaseBackendConfig
+from mlia.backend.executor.common import Command
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.common import load_config
+from mlia.backend.executor.common import Param
+from mlia.backend.executor.common import parse_raw_parameter
+from mlia.backend.executor.common import remove_backend
+from mlia.backend.executor.config import ApplicationConfig
+from mlia.backend.executor.config import UserParamConfig
+from mlia.backend.executor.execution import ExecutionContext
+from mlia.backend.executor.execution import ParamResolver
+from mlia.backend.executor.system import System
@pytest.mark.parametrize(
@@ -42,7 +42,9 @@ def test_remove_backend(
) -> None:
"""Test remove_backend function."""
mock_remove_resource = MagicMock()
- monkeypatch.setattr("mlia.backend.common.remove_resource", mock_remove_resource)
+ monkeypatch.setattr(
+ "mlia.backend.executor.common.remove_resource", mock_remove_resource
+ )
with expected_exception:
remove_backend(directory_name, "applications")
@@ -73,7 +75,7 @@ def test_load_config(
)
for config in configs:
json_mock = MagicMock()
- monkeypatch.setattr("mlia.backend.common.json.load", json_mock)
+ monkeypatch.setattr("mlia.backend.executor.common.json.load", json_mock)
load_config(config)
json_mock.assert_called_once()
diff --git a/tests/test_backend_execution.py b/tests/test_backend_executor_execution.py
index e56a1b0..6a6ea08 100644
--- a/tests/test_backend_execution.py
+++ b/tests/test_backend_executor_execution.py
@@ -7,16 +7,16 @@ from unittest.mock import MagicMock
import pytest
-from mlia.backend.application import Application
-from mlia.backend.common import UserParamConfig
-from mlia.backend.config import ApplicationConfig
-from mlia.backend.config import SystemConfig
-from mlia.backend.execution import ExecutionContext
-from mlia.backend.execution import get_application_and_system
-from mlia.backend.execution import get_application_by_name_and_system
-from mlia.backend.execution import ParamResolver
-from mlia.backend.execution import run_application
-from mlia.backend.system import load_system
+from mlia.backend.executor.application import Application
+from mlia.backend.executor.common import UserParamConfig
+from mlia.backend.executor.config import ApplicationConfig
+from mlia.backend.executor.config import SystemConfig
+from mlia.backend.executor.execution import ExecutionContext
+from mlia.backend.executor.execution import get_application_and_system
+from mlia.backend.executor.execution import get_application_by_name_and_system
+from mlia.backend.executor.execution import ParamResolver
+from mlia.backend.executor.execution import run_application
+from mlia.backend.executor.system import load_system
def test_context_param_resolver(tmpdir: Any) -> None:
@@ -181,7 +181,7 @@ def test_context_param_resolver(tmpdir: Any) -> None:
def test_get_application_by_name_and_system(monkeypatch: Any) -> None:
"""Test exceptional case for get_application_by_name_and_system."""
monkeypatch.setattr(
- "mlia.backend.execution.get_application",
+ "mlia.backend.executor.execution.get_application",
MagicMock(return_value=[MagicMock(), MagicMock()]),
)
@@ -196,7 +196,7 @@ def test_get_application_by_name_and_system(monkeypatch: Any) -> None:
def test_get_application_and_system(monkeypatch: Any) -> None:
"""Test exceptional case for get_application_and_system."""
monkeypatch.setattr(
- "mlia.backend.execution.get_system", MagicMock(return_value=None)
+ "mlia.backend.executor.execution.get_system", MagicMock(return_value=None)
)
with pytest.raises(ValueError, match="System test_system is not found"):
diff --git a/tests/test_backend_fs.py b/tests/test_backend_executor_fs.py
index 292a7cc..298b8db 100644
--- a/tests/test_backend_fs.py
+++ b/tests/test_backend_executor_fs.py
@@ -10,12 +10,12 @@ from unittest.mock import MagicMock
import pytest
-from mlia.backend.fs import get_backends_path
-from mlia.backend.fs import recreate_directory
-from mlia.backend.fs import remove_directory
-from mlia.backend.fs import remove_resource
-from mlia.backend.fs import ResourceType
-from mlia.backend.fs import valid_for_filename
+from mlia.backend.executor.fs import get_backends_path
+from mlia.backend.executor.fs import recreate_directory
+from mlia.backend.executor.fs import remove_directory
+from mlia.backend.executor.fs import remove_resource
+from mlia.backend.executor.fs import ResourceType
+from mlia.backend.executor.fs import valid_for_filename
@pytest.mark.parametrize(
@@ -39,10 +39,12 @@ def test_remove_resource_wrong_directory(
) -> None:
"""Test removing resource with wrong directory."""
mock_get_resources = MagicMock(return_value=test_applications_path)
- monkeypatch.setattr("mlia.backend.fs.get_backends_path", mock_get_resources)
+ monkeypatch.setattr(
+ "mlia.backend.executor.fs.get_backends_path", mock_get_resources
+ )
mock_shutil_rmtree = MagicMock()
- monkeypatch.setattr("mlia.backend.fs.shutil.rmtree", mock_shutil_rmtree)
+ monkeypatch.setattr("mlia.backend.executor.fs.shutil.rmtree", mock_shutil_rmtree)
with pytest.raises(Exception, match="Resource .* does not exist"):
remove_resource("unknown", "applications")
@@ -56,10 +58,12 @@ def test_remove_resource_wrong_directory(
def test_remove_resource(monkeypatch: Any, test_applications_path: Path) -> None:
"""Test removing resource data."""
mock_get_resources = MagicMock(return_value=test_applications_path)
- monkeypatch.setattr("mlia.backend.fs.get_backends_path", mock_get_resources)
+ monkeypatch.setattr(
+ "mlia.backend.executor.fs.get_backends_path", mock_get_resources
+ )
mock_shutil_rmtree = MagicMock()
- monkeypatch.setattr("mlia.backend.fs.shutil.rmtree", mock_shutil_rmtree)
+ monkeypatch.setattr("mlia.backend.executor.fs.shutil.rmtree", mock_shutil_rmtree)
remove_resource("application1", "applications")
mock_shutil_rmtree.assert_called_once()
diff --git a/tests/test_backend_output_consumer.py b/tests/test_backend_executor_output_consumer.py
index 2a46787..537084f 100644
--- a/tests/test_backend_output_consumer.py
+++ b/tests/test_backend_executor_output_consumer.py
@@ -9,8 +9,8 @@ from typing import Any
import pytest
-from mlia.backend.output_consumer import Base64OutputConsumer
-from mlia.backend.output_consumer import OutputConsumer
+from mlia.backend.executor.output_consumer import Base64OutputConsumer
+from mlia.backend.executor.output_consumer import OutputConsumer
OUTPUT_MATCH_ALL = bytearray(
diff --git a/tests/test_backend_proc.py b/tests/test_backend_executor_proc.py
index d2c2cd4..e8caf8a 100644
--- a/tests/test_backend_proc.py
+++ b/tests/test_backend_executor_proc.py
@@ -9,14 +9,14 @@ from unittest import mock
import pytest
from sh import ErrorReturnCode
-from mlia.backend.proc import Command
-from mlia.backend.proc import CommandFailedException
-from mlia.backend.proc import CommandNotFound
-from mlia.backend.proc import parse_command
-from mlia.backend.proc import print_command_stdout
-from mlia.backend.proc import run_and_wait
-from mlia.backend.proc import ShellCommand
-from mlia.backend.proc import terminate_command
+from mlia.backend.executor.proc import Command
+from mlia.backend.executor.proc import CommandFailedException
+from mlia.backend.executor.proc import CommandNotFound
+from mlia.backend.executor.proc import parse_command
+from mlia.backend.executor.proc import print_command_stdout
+from mlia.backend.executor.proc import run_and_wait
+from mlia.backend.executor.proc import ShellCommand
+from mlia.backend.executor.proc import terminate_command
class TestShellCommand:
@@ -136,12 +136,13 @@ class TestRunAndWait:
"""Init test method."""
self.execute_command_mock = mock.MagicMock()
monkeypatch.setattr(
- "mlia.backend.proc.execute_command", self.execute_command_mock
+ "mlia.backend.executor.proc.execute_command", self.execute_command_mock
)
self.terminate_command_mock = mock.MagicMock()
monkeypatch.setattr(
- "mlia.backend.proc.terminate_command", self.terminate_command_mock
+ "mlia.backend.executor.proc.terminate_command",
+ self.terminate_command_mock,
)
def test_if_execute_command_raises_exception(self) -> None:
diff --git a/tests/test_backend_executor_runner.py b/tests/test_backend_executor_runner.py
new file mode 100644
index 0000000..36c6e5e
--- /dev/null
+++ b/tests/test_backend_executor_runner.py
@@ -0,0 +1,254 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for module backend/manager."""
+from __future__ import annotations
+
+from pathlib import Path
+from unittest.mock import MagicMock
+from unittest.mock import PropertyMock
+
+import pytest
+
+from mlia.backend.corstone.performance import BackendRunner
+from mlia.backend.corstone.performance import ExecutionParams
+
+
+class TestBackendRunner:
+ """Tests for BackendRunner class."""
+
+ @staticmethod
+ def _setup_backends(
+ monkeypatch: pytest.MonkeyPatch,
+ available_systems: list[str] | None = None,
+ available_apps: list[str] | None = None,
+ ) -> None:
+ """Set up backend metadata."""
+
+ def mock_system(system: str) -> MagicMock:
+ """Mock the System instance."""
+ mock = MagicMock()
+ type(mock).name = PropertyMock(return_value=system)
+ return mock
+
+ def mock_app(app: str) -> MagicMock:
+ """Mock the Application instance."""
+ mock = MagicMock()
+ type(mock).name = PropertyMock(return_value=app)
+ mock.can_run_on.return_value = True
+ return mock
+
+ system_mocks = [mock_system(name) for name in (available_systems or [])]
+ monkeypatch.setattr(
+ "mlia.backend.executor.runner.get_available_systems",
+ MagicMock(return_value=system_mocks),
+ )
+
+ apps_mock = [mock_app(name) for name in (available_apps or [])]
+ monkeypatch.setattr(
+ "mlia.backend.executor.runner.get_available_applications",
+ MagicMock(return_value=apps_mock),
+ )
+
+ @pytest.mark.parametrize(
+ "available_systems, system, installed",
+ [
+ ([], "system1", False),
+ (["system1", "system2"], "system1", True),
+ ],
+ )
+ def test_is_system_installed(
+ self,
+ available_systems: list,
+ system: str,
+ installed: bool,
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
+ """Test method is_system_installed."""
+ backend_runner = BackendRunner()
+
+ self._setup_backends(monkeypatch, available_systems)
+
+ assert backend_runner.is_system_installed(system) == installed
+
+ @pytest.mark.parametrize(
+ "available_systems, systems",
+ [
+ ([], []),
+ (["system1"], ["system1"]),
+ ],
+ )
+ def test_installed_systems(
+ self,
+ available_systems: list[str],
+ systems: list[str],
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
+ """Test method installed_systems."""
+ backend_runner = BackendRunner()
+
+ self._setup_backends(monkeypatch, available_systems)
+ assert backend_runner.get_installed_systems() == systems
+
+ @staticmethod
+ def test_install_system(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test system installation."""
+ install_system_mock = MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.executor.runner.install_system", install_system_mock
+ )
+
+ backend_runner = BackendRunner()
+ backend_runner.install_system(Path("test_system_path"))
+
+ install_system_mock.assert_called_once_with(Path("test_system_path"))
+
+ @pytest.mark.parametrize(
+ "available_systems, systems, expected_result",
+ [
+ ([], [], False),
+ (["system1"], [], False),
+ (["system1"], ["system1"], True),
+ (["system1", "system2"], ["system1", "system3"], False),
+ (["system1", "system2"], ["system1", "system2"], True),
+ ],
+ )
+ def test_systems_installed(
+ self,
+ available_systems: list[str],
+ systems: list[str],
+ expected_result: bool,
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
+ """Test method systems_installed."""
+ self._setup_backends(monkeypatch, available_systems)
+
+ backend_runner = BackendRunner()
+
+ assert backend_runner.systems_installed(systems) is expected_result
+
+ @pytest.mark.parametrize(
+ "available_apps, applications, expected_result",
+ [
+ ([], [], False),
+ (["app1"], [], False),
+ (["app1"], ["app1"], True),
+ (["app1", "app2"], ["app1", "app3"], False),
+ (["app1", "app2"], ["app1", "app2"], True),
+ ],
+ )
+ def test_applications_installed(
+ self,
+ available_apps: list[str],
+ applications: list[str],
+ expected_result: bool,
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
+ """Test method applications_installed."""
+ self._setup_backends(monkeypatch, [], available_apps)
+ backend_runner = BackendRunner()
+
+ assert backend_runner.applications_installed(applications) is expected_result
+
+ @pytest.mark.parametrize(
+ "available_apps, applications",
+ [
+ ([], []),
+ (
+ ["application1", "application2"],
+ ["application1", "application2"],
+ ),
+ ],
+ )
+ def test_get_installed_applications(
+ self,
+ available_apps: list[str],
+ applications: list[str],
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
+ """Test method get_installed_applications."""
+ self._setup_backends(monkeypatch, [], available_apps)
+
+ backend_runner = BackendRunner()
+ assert applications == backend_runner.get_installed_applications()
+
+ @staticmethod
+ def test_install_application(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test application installation."""
+ mock_install_application = MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.executor.runner.install_application",
+ mock_install_application,
+ )
+
+ backend_runner = BackendRunner()
+ backend_runner.install_application(Path("test_application_path"))
+ mock_install_application.assert_called_once_with(Path("test_application_path"))
+
+ @pytest.mark.parametrize(
+ "available_apps, application, installed",
+ [
+ ([], "system1", False),
+ (
+ ["application1", "application2"],
+ "application1",
+ True,
+ ),
+ (
+ [],
+ "application1",
+ False,
+ ),
+ ],
+ )
+ def test_is_application_installed(
+ self,
+ available_apps: list[str],
+ application: str,
+ installed: bool,
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
+ """Test method is_application_installed."""
+ self._setup_backends(monkeypatch, [], available_apps)
+
+ backend_runner = BackendRunner()
+ assert installed == backend_runner.is_application_installed(
+ application, "system1"
+ )
+
+ @staticmethod
+ @pytest.mark.parametrize(
+ "execution_params, expected_command",
+ [
+ (
+ ExecutionParams("application_4", "System 4", [], []),
+ ["application_4", [], "System 4", []],
+ ),
+ (
+ ExecutionParams(
+ "application_6",
+ "System 6",
+ ["param1=value2"],
+ ["sys-param1=value2"],
+ ),
+ [
+ "application_6",
+ ["param1=value2"],
+ "System 6",
+ ["sys-param1=value2"],
+ ],
+ ),
+ ],
+ )
+ def test_run_application_local(
+ monkeypatch: pytest.MonkeyPatch,
+ execution_params: ExecutionParams,
+ expected_command: list[str],
+ ) -> None:
+ """Test method run_application with local systems."""
+ run_app = MagicMock()
+ monkeypatch.setattr("mlia.backend.executor.runner.run_application", run_app)
+
+ backend_runner = BackendRunner()
+ backend_runner.run_application(execution_params)
+
+ run_app.assert_called_once_with(*expected_command)
diff --git a/tests/test_backend_source.py b/tests/test_backend_executor_source.py
index c6ef26f..3aa336e 100644
--- a/tests/test_backend_source.py
+++ b/tests/test_backend_executor_source.py
@@ -10,11 +10,11 @@ from unittest.mock import patch
import pytest
-from mlia.backend.common import ConfigurationException
-from mlia.backend.source import create_destination_and_install
-from mlia.backend.source import DirectorySource
-from mlia.backend.source import get_source
-from mlia.backend.source import TarArchiveSource
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.source import create_destination_and_install
+from mlia.backend.executor.source import DirectorySource
+from mlia.backend.executor.source import get_source
+from mlia.backend.executor.source import TarArchiveSource
def test_create_destination_and_install(test_systems_path: Path, tmpdir: Any) -> None:
@@ -27,7 +27,10 @@ def test_create_destination_and_install(test_systems_path: Path, tmpdir: Any) ->
assert (resources / "system1").is_dir()
-@patch("mlia.backend.source.DirectorySource.create_destination", return_value=False)
+@patch(
+ "mlia.backend.executor.source.DirectorySource.create_destination",
+ return_value=False,
+)
def test_create_destination_and_install_if_dest_creation_not_required(
mock_ds_create_destination: Any, tmpdir: Any
) -> None:
diff --git a/tests/test_backend_system.py b/tests/test_backend_executor_system.py
index ecc149d..c94ef30 100644
--- a/tests/test_backend_system.py
+++ b/tests/test_backend_executor_system.py
@@ -10,17 +10,17 @@ from unittest.mock import MagicMock
import pytest
-from mlia.backend.common import Command
-from mlia.backend.common import ConfigurationException
-from mlia.backend.common import Param
-from mlia.backend.common import UserParamConfig
-from mlia.backend.config import SystemConfig
-from mlia.backend.system import get_available_systems
-from mlia.backend.system import get_system
-from mlia.backend.system import install_system
-from mlia.backend.system import load_system
-from mlia.backend.system import remove_system
-from mlia.backend.system import System
+from mlia.backend.executor.common import Command
+from mlia.backend.executor.common import ConfigurationException
+from mlia.backend.executor.common import Param
+from mlia.backend.executor.common import UserParamConfig
+from mlia.backend.executor.config import SystemConfig
+from mlia.backend.executor.system import get_available_systems
+from mlia.backend.executor.system import get_system
+from mlia.backend.executor.system import install_system
+from mlia.backend.executor.system import load_system
+from mlia.backend.executor.system import remove_system
+from mlia.backend.executor.system import System
def test_get_available_systems() -> None:
@@ -95,7 +95,7 @@ def test_install_system(
"""Test system installation from archive."""
mock_create_destination_and_install = MagicMock()
monkeypatch.setattr(
- "mlia.backend.system.create_destination_and_install",
+ "mlia.backend.executor.system.create_destination_and_install",
mock_create_destination_and_install,
)
@@ -108,7 +108,9 @@ def test_install_system(
def test_remove_system(monkeypatch: Any) -> None:
"""Test system removal."""
mock_remove_backend = MagicMock()
- monkeypatch.setattr("mlia.backend.system.remove_backend", mock_remove_backend)
+ monkeypatch.setattr(
+ "mlia.backend.executor.system.remove_backend", mock_remove_backend
+ )
remove_system("some_system_dir")
mock_remove_backend.assert_called_once()
diff --git a/tests/test_backend_install.py b/tests/test_backend_install.py
new file mode 100644
index 0000000..024a833
--- /dev/null
+++ b/tests/test_backend_install.py
@@ -0,0 +1,124 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for common management functionality."""
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+from mlia.backend.install import BackendInfo
+from mlia.backend.install import get_all_application_names
+from mlia.backend.install import get_all_system_names
+from mlia.backend.install import get_system_name
+from mlia.backend.install import is_supported
+from mlia.backend.install import StaticPathChecker
+from mlia.backend.install import supported_backends
+
+
+@pytest.mark.parametrize(
+ "copy_source, system_config",
+ [
+ (True, "system_config.json"),
+ (True, None),
+ (False, "system_config.json"),
+ (False, None),
+ ],
+)
+def test_static_path_checker(
+ tmp_path: Path, copy_source: bool, system_config: str
+) -> None:
+ """Test static path checker."""
+ checker = StaticPathChecker(tmp_path, ["file1.txt"], copy_source, system_config)
+ tmp_path.joinpath("file1.txt").touch()
+
+ result = checker(tmp_path)
+ assert result == BackendInfo(tmp_path, copy_source, system_config)
+
+
+def test_static_path_checker_invalid_path(tmp_path: Path) -> None:
+ """Test static path checker with invalid path."""
+ checker = StaticPathChecker(tmp_path, ["file1.txt"])
+
+ result = checker(tmp_path)
+ assert result is None
+
+ result = checker(tmp_path / "unknown_directory")
+ assert result is None
+
+
+def test_supported_backends() -> None:
+ """Test function supported backends."""
+ assert supported_backends() == ["Corstone-300", "Corstone-310"]
+
+
+@pytest.mark.parametrize(
+ "backend, expected_result",
+ [
+ ["unknown_backend", False],
+ ["Corstone-300", True],
+ ["Corstone-310", True],
+ ],
+)
+def test_is_supported(backend: str, expected_result: bool) -> None:
+ """Test function is_supported."""
+ assert is_supported(backend) == expected_result
+
+
+@pytest.mark.parametrize(
+ "backend, expected_result",
+ [
+ [
+ "Corstone-300",
+ [
+ "Corstone-300: Cortex-M55+Ethos-U55",
+ "Corstone-300: Cortex-M55+Ethos-U65",
+ ],
+ ],
+ [
+ "Corstone-310",
+ [
+ "Corstone-310: Cortex-M85+Ethos-U55",
+ "Corstone-310: Cortex-M85+Ethos-U65",
+ ],
+ ],
+ ],
+)
+def test_get_all_system_names(backend: str, expected_result: list[str]) -> None:
+ """Test function get_all_system_names."""
+ assert sorted(get_all_system_names(backend)) == expected_result
+
+
+@pytest.mark.parametrize(
+ "backend, expected_result",
+ [
+ [
+ "Corstone-300",
+ [
+ "Generic Inference Runner: Ethos-U55",
+ "Generic Inference Runner: Ethos-U65",
+ ],
+ ],
+ [
+ "Corstone-310",
+ [
+ "Generic Inference Runner: Ethos-U55",
+ "Generic Inference Runner: Ethos-U65",
+ ],
+ ],
+ ],
+)
+def test_get_all_application_names(backend: str, expected_result: list[str]) -> None:
+ """Test function get_all_application_names."""
+ assert sorted(get_all_application_names(backend)) == expected_result
+
+
+def test_get_system_name() -> None:
+ """Test function get_system_name."""
+ assert (
+ get_system_name("Corstone-300", "ethos-u55")
+ == "Corstone-300: Cortex-M55+Ethos-U55"
+ )
+
+ with pytest.raises(KeyError):
+ get_system_name("some_backend", "some_type")
diff --git a/tests/test_backend_manager.py b/tests/test_backend_manager.py
index dfbcdaa..19cb357 100644
--- a/tests/test_backend_manager.py
+++ b/tests/test_backend_manager.py
@@ -1,758 +1,282 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Tests for module backend/manager."""
+"""Tests for installation manager."""
from __future__ import annotations
-import base64
-import json
-from contextlib import ExitStack as does_not_raise
from pathlib import Path
from typing import Any
+from unittest.mock import call
from unittest.mock import MagicMock
from unittest.mock import PropertyMock
import pytest
-from mlia.backend.application import get_application
-from mlia.backend.execution import ExecutionContext
-from mlia.backend.manager import BackendRunner
-from mlia.backend.manager import DeviceInfo
-from mlia.backend.manager import estimate_performance
-from mlia.backend.manager import ExecutionParams
-from mlia.backend.manager import GenericInferenceOutputParser
-from mlia.backend.manager import GenericInferenceRunnerEthosU
-from mlia.backend.manager import get_generic_runner
-from mlia.backend.manager import get_system_name
-from mlia.backend.manager import is_supported
-from mlia.backend.manager import ModelInfo
-from mlia.backend.manager import PerformanceMetrics
-from mlia.backend.manager import supported_backends
-from mlia.backend.output_consumer import Base64OutputConsumer
-from mlia.backend.system import get_system
-
-
-def _mock_encode_b64(data: dict[str, int]) -> str:
- """
- Encode the given data into a mock base64-encoded string of JSON.
-
- This reproduces the base64 encoding done in the Corstone applications.
-
- JSON example:
-
- ```json
- [{'count': 1,
- 'profiling_group': 'Inference',
- 'samples': [{'name': 'NPU IDLE', 'value': [612]},
- {'name': 'NPU AXI0_RD_DATA_BEAT_RECEIVED', 'value': [165872]},
- {'name': 'NPU AXI0_WR_DATA_BEAT_WRITTEN', 'value': [88712]},
- {'name': 'NPU AXI1_RD_DATA_BEAT_RECEIVED', 'value': [57540]},
- {'name': 'NPU ACTIVE', 'value': [520489]},
- {'name': 'NPU TOTAL', 'value': [521101]}]}]
- ```
- """
- wrapped_data = [
- {
- "count": 1,
- "profiling_group": "Inference",
- "samples": [
- {"name": name, "value": [value]} for name, value in data.items()
- ],
- }
- ]
- json_str = json.dumps(wrapped_data)
- json_bytes = bytearray(json_str, encoding="utf-8")
- json_b64 = base64.b64encode(json_bytes).decode("utf-8")
- tag = Base64OutputConsumer.TAG_NAME
- return f"<{tag}>{json_b64}</{tag}>"
+from mlia.backend.install import DownloadAndInstall
+from mlia.backend.install import Installation
+from mlia.backend.install import InstallationType
+from mlia.backend.install import InstallFromPath
+from mlia.backend.manager import DefaultInstallationManager
-@pytest.mark.parametrize(
- "data, is_ready, result, missed_keys",
- [
- (
- [],
- False,
- {},
- {
- "npu_active_cycles",
- "npu_axi0_rd_data_beat_received",
- "npu_axi0_wr_data_beat_written",
- "npu_axi1_rd_data_beat_received",
- "npu_idle_cycles",
- "npu_total_cycles",
- },
- ),
- (
- ["sample text"],
- False,
- {},
- {
- "npu_active_cycles",
- "npu_axi0_rd_data_beat_received",
- "npu_axi0_wr_data_beat_written",
- "npu_axi1_rd_data_beat_received",
- "npu_idle_cycles",
- "npu_total_cycles",
- },
- ),
- (
- [_mock_encode_b64({"NPU AXI0_RD_DATA_BEAT_RECEIVED": 123})],
- False,
- {"npu_axi0_rd_data_beat_received": 123},
- {
- "npu_active_cycles",
- "npu_axi0_wr_data_beat_written",
- "npu_axi1_rd_data_beat_received",
- "npu_idle_cycles",
- "npu_total_cycles",
- },
- ),
- (
- [
- _mock_encode_b64(
- {
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
- "NPU ACTIVE": 4,
- "NPU IDLE": 5,
- "NPU TOTAL": 6,
- }
- )
- ],
- True,
- {
- "npu_axi0_rd_data_beat_received": 1,
- "npu_axi0_wr_data_beat_written": 2,
- "npu_axi1_rd_data_beat_received": 3,
- "npu_active_cycles": 4,
- "npu_idle_cycles": 5,
- "npu_total_cycles": 6,
- },
- set(),
- ),
- ],
-)
-def test_generic_inference_output_parser(
- data: dict[str, int], is_ready: bool, result: dict, missed_keys: set[str]
-) -> None:
- """Test generic runner output parser."""
- parser = GenericInferenceOutputParser()
-
- for line in data:
- parser.feed(line)
-
- assert parser.is_ready() == is_ready
- assert parser.result == result
- assert parser.missed_keys() == missed_keys
-
-
-class TestBackendRunner:
- """Tests for BackendRunner class."""
-
- @staticmethod
- def _setup_backends(
- monkeypatch: pytest.MonkeyPatch,
- available_systems: list[str] | None = None,
- available_apps: list[str] | None = None,
- ) -> None:
- """Set up backend metadata."""
-
- def mock_system(system: str) -> MagicMock:
- """Mock the System instance."""
- mock = MagicMock()
- type(mock).name = PropertyMock(return_value=system)
- return mock
-
- def mock_app(app: str) -> MagicMock:
- """Mock the Application instance."""
- mock = MagicMock()
- type(mock).name = PropertyMock(return_value=app)
- mock.can_run_on.return_value = True
- return mock
-
- system_mocks = [mock_system(name) for name in (available_systems or [])]
- monkeypatch.setattr(
- "mlia.backend.manager.get_available_systems",
- MagicMock(return_value=system_mocks),
- )
+def get_default_installation_manager_mock(
+ name: str,
+ already_installed: bool = False,
+) -> MagicMock:
+ """Get mock instance for DefaultInstallationManager."""
+ mock = MagicMock(spec=DefaultInstallationManager)
- apps_mock = [mock_app(name) for name in (available_apps or [])]
- monkeypatch.setattr(
- "mlia.backend.manager.get_available_applications",
- MagicMock(return_value=apps_mock),
- )
+ props = {
+ "name": name,
+ "already_installed": already_installed,
+ }
+ for prop, value in props.items():
+ setattr(type(mock), prop, PropertyMock(return_value=value))
- @pytest.mark.parametrize(
- "available_systems, system, installed",
- [
- ([], "system1", False),
- (["system1", "system2"], "system1", True),
- ],
- )
- def test_is_system_installed(
- self,
- available_systems: list,
- system: str,
- installed: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method is_system_installed."""
- backend_runner = BackendRunner()
-
- self._setup_backends(monkeypatch, available_systems)
-
- assert backend_runner.is_system_installed(system) == installed
-
- @pytest.mark.parametrize(
- "available_systems, systems",
- [
- ([], []),
- (["system1"], ["system1"]),
- ],
- )
- def test_installed_systems(
- self,
- available_systems: list[str],
- systems: list[str],
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method installed_systems."""
- backend_runner = BackendRunner()
-
- self._setup_backends(monkeypatch, available_systems)
- assert backend_runner.get_installed_systems() == systems
-
- @staticmethod
- def test_install_system(monkeypatch: pytest.MonkeyPatch) -> None:
- """Test system installation."""
- install_system_mock = MagicMock()
- monkeypatch.setattr("mlia.backend.manager.install_system", install_system_mock)
-
- backend_runner = BackendRunner()
- backend_runner.install_system(Path("test_system_path"))
-
- install_system_mock.assert_called_once_with(Path("test_system_path"))
-
- @pytest.mark.parametrize(
- "available_systems, systems, expected_result",
- [
- ([], [], False),
- (["system1"], [], False),
- (["system1"], ["system1"], True),
- (["system1", "system2"], ["system1", "system3"], False),
- (["system1", "system2"], ["system1", "system2"], True),
- ],
- )
- def test_systems_installed(
- self,
- available_systems: list[str],
- systems: list[str],
- expected_result: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method systems_installed."""
- self._setup_backends(monkeypatch, available_systems)
-
- backend_runner = BackendRunner()
-
- assert backend_runner.systems_installed(systems) is expected_result
-
- @pytest.mark.parametrize(
- "available_apps, applications, expected_result",
- [
- ([], [], False),
- (["app1"], [], False),
- (["app1"], ["app1"], True),
- (["app1", "app2"], ["app1", "app3"], False),
- (["app1", "app2"], ["app1", "app2"], True),
- ],
- )
- def test_applications_installed(
- self,
- available_apps: list[str],
- applications: list[str],
- expected_result: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method applications_installed."""
- self._setup_backends(monkeypatch, [], available_apps)
- backend_runner = BackendRunner()
-
- assert backend_runner.applications_installed(applications) is expected_result
-
- @pytest.mark.parametrize(
- "available_apps, applications",
- [
- ([], []),
- (
- ["application1", "application2"],
- ["application1", "application2"],
- ),
- ],
- )
- def test_get_installed_applications(
- self,
- available_apps: list[str],
- applications: list[str],
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method get_installed_applications."""
- self._setup_backends(monkeypatch, [], available_apps)
-
- backend_runner = BackendRunner()
- assert applications == backend_runner.get_installed_applications()
-
- @staticmethod
- def test_install_application(monkeypatch: pytest.MonkeyPatch) -> None:
- """Test application installation."""
- mock_install_application = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.manager.install_application", mock_install_application
- )
+ return mock
- backend_runner = BackendRunner()
- backend_runner.install_application(Path("test_application_path"))
- mock_install_application.assert_called_once_with(Path("test_application_path"))
- @pytest.mark.parametrize(
- "available_apps, application, installed",
- [
- ([], "system1", False),
- (
- ["application1", "application2"],
- "application1",
- True,
- ),
- (
- [],
- "application1",
- False,
- ),
- ],
+def _ready_for_uninstall_mock() -> MagicMock:
+ return get_default_installation_manager_mock(
+ name="already_installed",
+ already_installed=True,
)
- def test_is_application_installed(
- self,
- available_apps: list[str],
- application: str,
- installed: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method is_application_installed."""
- self._setup_backends(monkeypatch, [], available_apps)
-
- backend_runner = BackendRunner()
- assert installed == backend_runner.is_application_installed(
- application, "system1"
- )
- @staticmethod
- @pytest.mark.parametrize(
- "execution_params, expected_command",
- [
- (
- ExecutionParams("application_4", "System 4", [], []),
- ["application_4", [], "System 4", []],
- ),
- (
- ExecutionParams(
- "application_6",
- "System 6",
- ["param1=value2"],
- ["sys-param1=value2"],
- ),
- [
- "application_6",
- ["param1=value2"],
- "System 6",
- ["sys-param1=value2"],
- ],
- ),
- ],
- )
- def test_run_application_local(
- monkeypatch: pytest.MonkeyPatch,
- execution_params: ExecutionParams,
- expected_command: list[str],
- ) -> None:
- """Test method run_application with local systems."""
- run_app = MagicMock()
- monkeypatch.setattr("mlia.backend.manager.run_application", run_app)
- backend_runner = BackendRunner()
- backend_runner.run_application(execution_params)
+def get_installation_mock(
+ name: str,
+ already_installed: bool = False,
+ could_be_installed: bool = False,
+ supported_install_type: type | tuple | None = None,
+) -> MagicMock:
+ """Get mock instance for the installation."""
+ mock = MagicMock(spec=Installation)
- run_app.assert_called_once_with(*expected_command)
+ def supports(install_type: InstallationType) -> bool:
+ if supported_install_type is None:
+ return False
+ return isinstance(install_type, supported_install_type)
-@pytest.mark.parametrize(
- "device, system, application, backend, expected_error",
- [
- (
- DeviceInfo(device_type="ethos-u55", mac=32),
- ("Corstone-300: Cortex-M55+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", True),
- "Corstone-300",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="ethos-u55", mac=32),
- ("Corstone-300: Cortex-M55+Ethos-U55", False),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"System Corstone-300: Cortex-M55\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u55", mac=32),
- ("Corstone-300: Cortex-M55+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U55 "
- r"for the system Corstone-300: Cortex-M55\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u55", mac=32),
- ("Corstone-310: Cortex-M85+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", True),
- "Corstone-310",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="ethos-u55", mac=32),
- ("Corstone-310: Cortex-M85+Ethos-U55", False),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"System Corstone-310: Cortex-M85\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u55", mac=32),
- ("Corstone-310: Cortex-M85+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U55 "
- r"for the system Corstone-310: Cortex-M85\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u65", mac=512),
- ("Corstone-300: Cortex-M55+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", True),
- "Corstone-300",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="ethos-u65", mac=512),
- ("Corstone-300: Cortex-M55+Ethos-U65", False),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"System Corstone-300: Cortex-M55\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u65", mac=512),
- ("Corstone-300: Cortex-M55+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U65 "
- r"for the system Corstone-300: Cortex-M55\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u65", mac=512),
- ("Corstone-310: Cortex-M85+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", True),
- "Corstone-310",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="ethos-u65", mac=512),
- ("Corstone-310: Cortex-M85+Ethos-U65", False),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"System Corstone-310: Cortex-M85\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="ethos-u65", mac=512),
- ("Corstone-310: Cortex-M85+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U65 "
- r"for the system Corstone-310: Cortex-M85\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(
- device_type="unknown_device", # type: ignore
- mac=None, # type: ignore
- ),
- ("some_system", False),
- ("some_application", False),
- "some backend",
- pytest.raises(Exception, match="Unsupported device unknown_device"),
- ),
- ],
-)
-def test_estimate_performance(
- device: DeviceInfo,
- system: tuple[str, bool],
- application: tuple[str, bool],
- backend: str,
- expected_error: Any,
- test_tflite_model: Path,
- backend_runner: MagicMock,
-) -> None:
- """Test getting performance estimations."""
- system_name, system_installed = system
- application_name, application_installed = application
+ mock.supports.side_effect = supports
- backend_runner.is_system_installed.return_value = system_installed
- backend_runner.is_application_installed.return_value = application_installed
+ props = {
+ "name": name,
+ "already_installed": already_installed,
+ "could_be_installed": could_be_installed,
+ }
+ for prop, value in props.items():
+ setattr(type(mock), prop, PropertyMock(return_value=value))
- mock_context = create_mock_context(
- [
- _mock_encode_b64(
- {
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
- "NPU ACTIVE": 4,
- "NPU IDLE": 5,
- "NPU TOTAL": 6,
- }
- )
- ]
- )
+ return mock
- backend_runner.run_application.return_value = mock_context
- with expected_error:
- perf_metrics = estimate_performance(
- ModelInfo(test_tflite_model), device, backend
- )
+def _already_installed_mock() -> MagicMock:
+ return get_installation_mock(
+ name="already_installed",
+ already_installed=True,
+ supported_install_type=(DownloadAndInstall, InstallFromPath),
+ )
- assert isinstance(perf_metrics, PerformanceMetrics)
- assert perf_metrics == PerformanceMetrics(
- npu_axi0_rd_data_beat_received=1,
- npu_axi0_wr_data_beat_written=2,
- npu_axi1_rd_data_beat_received=3,
- npu_active_cycles=4,
- npu_idle_cycles=5,
- npu_total_cycles=6,
- )
- assert backend_runner.is_system_installed.called_once_with(system_name)
- assert backend_runner.is_application_installed.called_once_with(
- application_name, system_name
- )
+def _ready_for_installation_mock() -> MagicMock:
+ return get_installation_mock(
+ name="ready_for_installation",
+ already_installed=False,
+ could_be_installed=True,
+ )
-@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
-def test_estimate_performance_insufficient_data(
- backend_runner: MagicMock, test_tflite_model: Path, backend: str
-) -> None:
- """Test that performance could not be estimated when not all data presented."""
- backend_runner.is_system_installed.return_value = True
- backend_runner.is_application_installed.return_value = True
-
- no_total_cycles_output = {
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
- "NPU ACTIVE": 4,
- "NPU IDLE": 5,
- }
- mock_context = create_mock_context([_mock_encode_b64(no_total_cycles_output)])
+def _could_be_downloaded_and_installed_mock() -> MagicMock:
+ return get_installation_mock(
+ name="could_be_downloaded_and_installed",
+ already_installed=False,
+ could_be_installed=True,
+ supported_install_type=DownloadAndInstall,
+ )
- backend_runner.run_application.return_value = mock_context
- with pytest.raises(
- Exception, match="Unable to get performance metrics, insufficient data"
- ):
- device = DeviceInfo(device_type="ethos-u55", mac=32)
- estimate_performance(ModelInfo(test_tflite_model), device, backend)
+def _could_be_installed_from_mock() -> MagicMock:
+ return get_installation_mock(
+ name="could_be_installed_from",
+ already_installed=False,
+ could_be_installed=True,
+ supported_install_type=InstallFromPath,
+ )
-@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
-def test_estimate_performance_invalid_output(
- test_tflite_model: Path, backend_runner: MagicMock, backend: str
-) -> None:
- """Test estimation could not be done if inference produces unexpected output."""
- backend_runner.is_system_installed.return_value = True
- backend_runner.is_application_installed.return_value = True
-
- mock_context = create_mock_context(["Something", "is", "wrong"])
- backend_runner.run_application.return_value = mock_context
-
- with pytest.raises(Exception, match="Unable to get performance metrics"):
- estimate_performance(
- ModelInfo(test_tflite_model),
- DeviceInfo(device_type="ethos-u55", mac=256),
- backend=backend,
+def get_installation_manager(
+ noninteractive: bool,
+ installations: list[Any],
+ monkeypatch: pytest.MonkeyPatch,
+ yes_response: bool = True,
+) -> DefaultInstallationManager:
+ """Get installation manager instance."""
+ if not noninteractive:
+ monkeypatch.setattr(
+ "mlia.backend.manager.yes", MagicMock(return_value=yes_response)
)
+ return DefaultInstallationManager(installations, noninteractive=noninteractive)
-def create_mock_process(stdout: list[str], stderr: list[str]) -> MagicMock:
- """Mock underlying process."""
- mock_process = MagicMock()
- mock_process.poll.return_value = 0
- type(mock_process).stdout = PropertyMock(return_value=iter(stdout))
- type(mock_process).stderr = PropertyMock(return_value=iter(stderr))
- return mock_process
+def test_installation_manager_filtering() -> None:
+ """Test default installation manager."""
+ already_installed = _already_installed_mock()
+ ready_for_installation = _ready_for_installation_mock()
+ could_be_downloaded_and_installed = _could_be_downloaded_and_installed_mock()
-def create_mock_context(stdout: list[str]) -> ExecutionContext:
- """Mock ExecutionContext."""
- ctx = ExecutionContext(
- app=get_application("application_1")[0],
- app_params=[],
- system=get_system("System 1"),
- system_params=[],
+ manager = DefaultInstallationManager(
+ [
+ already_installed,
+ ready_for_installation,
+ could_be_downloaded_and_installed,
+ ]
)
- ctx.stdout = bytearray("\n".join(stdout).encode("utf-8"))
- return ctx
+ assert manager.already_installed("already_installed") == [already_installed]
+ assert manager.ready_for_installation() == [
+ ready_for_installation,
+ could_be_downloaded_and_installed,
+ ]
-@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
-def test_get_generic_runner(backend: str) -> None:
- """Test function get_generic_runner()."""
- device_info = DeviceInfo("ethos-u55", 256)
+@pytest.mark.parametrize("noninteractive", [True, False])
+@pytest.mark.parametrize(
+ "install_mock, eula_agreement, backend_name, force, expected_call",
+ [
+ [
+ _could_be_downloaded_and_installed_mock(),
+ True,
+ "could_be_downloaded_and_installed",
+ False,
+ [call(DownloadAndInstall(eula_agreement=True))],
+ ],
+ [
+ _could_be_downloaded_and_installed_mock(),
+ False,
+ "could_be_downloaded_and_installed",
+ True,
+ [call(DownloadAndInstall(eula_agreement=False))],
+ ],
+ [
+ _already_installed_mock(),
+ False,
+ "already_installed",
+ True,
+ [call(DownloadAndInstall(eula_agreement=False))],
+ ],
+ [
+ _could_be_downloaded_and_installed_mock(),
+ False,
+ "unknown",
+ True,
+ [],
+ ],
+ ],
+)
+def test_installation_manager_download_and_install(
+ install_mock: MagicMock,
+ noninteractive: bool,
+ eula_agreement: bool,
+ backend_name: str,
+ force: bool,
+ expected_call: Any,
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ """Test installation process."""
+ install_mock.reset_mock()
- runner = get_generic_runner(device_info=device_info, backend=backend)
- assert isinstance(runner, GenericInferenceRunnerEthosU)
+ manager = get_installation_manager(noninteractive, [install_mock], monkeypatch)
- with pytest.raises(RuntimeError):
- get_generic_runner(device_info=device_info, backend="UNKNOWN_BACKEND")
+ manager.download_and_install(
+ backend_name, eula_agreement=eula_agreement, force=force
+ )
+ assert install_mock.install.mock_calls == expected_call
+ if force and install_mock.already_installed:
+ install_mock.uninstall.assert_called_once()
+ else:
+ install_mock.uninstall.assert_not_called()
+
+@pytest.mark.parametrize("noninteractive", [True, False])
@pytest.mark.parametrize(
- ("backend", "device_type"),
- (
- ("Corstone-300", "ethos-u55"),
- ("Corstone-300", "ethos-u65"),
- ("Corstone-310", "ethos-u55"),
- ),
+ "install_mock, backend_name, force, expected_call",
+ [
+ [
+ _could_be_installed_from_mock(),
+ "could_be_installed_from",
+ False,
+ [call(InstallFromPath(Path("some_path")))],
+ ],
+ [
+ _could_be_installed_from_mock(),
+ "unknown",
+ False,
+ [],
+ ],
+ [
+ _could_be_installed_from_mock(),
+ "unknown",
+ True,
+ [],
+ ],
+ [
+ _already_installed_mock(),
+ "already_installed",
+ False,
+ [],
+ ],
+ [
+ _already_installed_mock(),
+ "already_installed",
+ True,
+ [call(InstallFromPath(Path("some_path")))],
+ ],
+ ],
)
-def test_backend_support(backend: str, device_type: str) -> None:
- """Test backend & device support."""
- assert is_supported(backend)
- assert is_supported(backend, device_type)
-
- assert get_system_name(backend, device_type)
+def test_installation_manager_install_from(
+ install_mock: MagicMock,
+ noninteractive: bool,
+ backend_name: str,
+ force: bool,
+ expected_call: Any,
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ """Test installation process."""
+ install_mock.reset_mock()
- assert backend in supported_backends()
+ manager = get_installation_manager(noninteractive, [install_mock], monkeypatch)
+ manager.install_from(Path("some_path"), backend_name, force=force)
+ assert install_mock.install.mock_calls == expected_call
+ if force and install_mock.already_installed:
+ install_mock.uninstall.assert_called_once()
+ else:
+ install_mock.uninstall.assert_not_called()
-class TestGenericInferenceRunnerEthosU:
- """Test for the class GenericInferenceRunnerEthosU."""
- @staticmethod
- @pytest.mark.parametrize(
- "device, backend, expected_system, expected_app",
+@pytest.mark.parametrize("noninteractive", [True, False])
+@pytest.mark.parametrize(
+ "install_mock, backend_name, expected_call",
+ [
[
- [
- DeviceInfo("ethos-u55", 256),
- "Corstone-300",
- "Corstone-300: Cortex-M55+Ethos-U55",
- "Generic Inference Runner: Ethos-U55",
- ],
- [
- DeviceInfo("ethos-u65", 256),
- "Corstone-300",
- "Corstone-300: Cortex-M55+Ethos-U65",
- "Generic Inference Runner: Ethos-U65",
- ],
- [
- DeviceInfo("ethos-u55", 256),
- "Corstone-310",
- "Corstone-310: Cortex-M85+Ethos-U55",
- "Generic Inference Runner: Ethos-U55",
- ],
- [
- DeviceInfo("ethos-u65", 256),
- "Corstone-310",
- "Corstone-310: Cortex-M85+Ethos-U65",
- "Generic Inference Runner: Ethos-U65",
- ],
+ _ready_for_uninstall_mock(),
+ "already_installed",
+ [call()],
],
- )
- def test_artifact_resolver(
- device: DeviceInfo, backend: str, expected_system: str, expected_app: str
- ) -> None:
- """Test artifact resolving based on the provided parameters."""
- generic_runner = get_generic_runner(device, backend)
- assert isinstance(generic_runner, GenericInferenceRunnerEthosU)
-
- assert generic_runner.system_name == expected_system
- assert generic_runner.app_name == expected_app
-
- @staticmethod
- def test_artifact_resolver_unsupported_backend() -> None:
- """Test that it should be not possible to use unsupported backends."""
- with pytest.raises(
- RuntimeError, match="Unsupported device ethos-u65 for backend test_backend"
- ):
- get_generic_runner(DeviceInfo("ethos-u65", 256), "test_backend")
-
- @staticmethod
- @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
- def test_inference_should_fail_if_system_not_installed(
- backend_runner: MagicMock, test_tflite_model: Path, backend: str
- ) -> None:
- """Test that inference should fail if system is not installed."""
- backend_runner.is_system_installed.return_value = False
-
- generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend)
- with pytest.raises(
- Exception,
- match=r"System Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not installed",
- ):
- generic_runner.run(ModelInfo(test_tflite_model), [])
-
- @staticmethod
- @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
- def test_inference_should_fail_is_apps_not_installed(
- backend_runner: MagicMock, test_tflite_model: Path, backend: str
- ) -> None:
- """Test that inference should fail if apps are not installed."""
- backend_runner.is_system_installed.return_value = True
- backend_runner.is_application_installed.return_value = False
-
- generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend)
- with pytest.raises(
- Exception,
- match="Application Generic Inference Runner: Ethos-U55"
- r" for the system Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not "
- r"installed",
- ):
- generic_runner.run(ModelInfo(test_tflite_model), [])
-
-
-@pytest.fixture(name="backend_runner")
-def fixture_backend_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock:
- """Mock backend runner."""
- backend_runner_mock = MagicMock(spec=BackendRunner)
- monkeypatch.setattr(
- "mlia.backend.manager.get_backend_runner",
- MagicMock(return_value=backend_runner_mock),
- )
- return backend_runner_mock
+ ],
+)
+def test_installation_manager_uninstall(
+ install_mock: MagicMock,
+ noninteractive: bool,
+ backend_name: str,
+ expected_call: Any,
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ """Test uninstallation."""
+ install_mock.reset_mock()
+
+ manager = get_installation_manager(noninteractive, [install_mock], monkeypatch)
+ manager.uninstall(backend_name)
+
+ assert install_mock.uninstall.mock_calls == expected_call
diff --git a/tests/test_tools_metadata_py_package.py b/tests/test_backend_tosa_checker_install.py
index 8b93e33..0393f0b 100644
--- a/tests/test_tools_metadata_py_package.py
+++ b/tests/test_backend_tosa_checker_install.py
@@ -6,22 +6,10 @@ from unittest.mock import MagicMock
import pytest
-from mlia.tools.metadata.common import DownloadAndInstall
-from mlia.tools.metadata.common import InstallFromPath
-from mlia.tools.metadata.py_package import get_pypackage_backend_installations
-from mlia.tools.metadata.py_package import get_tosa_backend_installation
-from mlia.tools.metadata.py_package import PyPackageBackendInstallation
-
-
-def test_get_pypackage_backends() -> None:
- """Test function get_pypackage_backends."""
- backend_installs = get_pypackage_backend_installations()
-
- assert isinstance(backend_installs, list)
- assert len(backend_installs) == 1
-
- tosa_installation = backend_installs[0]
- assert isinstance(tosa_installation, PyPackageBackendInstallation)
+from mlia.backend.install import DownloadAndInstall
+from mlia.backend.install import InstallFromPath
+from mlia.backend.install import PyPackageBackendInstallation
+from mlia.backend.tosa_checker.install import get_tosa_backend_installation
def test_get_tosa_backend_installation(
@@ -30,7 +18,7 @@ def test_get_tosa_backend_installation(
"""Test function get_tosa_backend_installation."""
mock_package_manager = MagicMock()
monkeypatch.setattr(
- "mlia.tools.metadata.py_package.get_package_manager",
+ "mlia.backend.install.get_package_manager",
lambda: mock_package_manager,
)
diff --git a/tests/test_backend_vela_compat.py b/tests/test_backend_vela_compat.py
new file mode 100644
index 0000000..6f7a41c
--- /dev/null
+++ b/tests/test_backend_vela_compat.py
@@ -0,0 +1,74 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for module vela/compat."""
+from pathlib import Path
+
+import pytest
+
+from mlia.backend.vela.compat import generate_supported_operators_report
+from mlia.backend.vela.compat import NpuSupported
+from mlia.backend.vela.compat import Operator
+from mlia.backend.vela.compat import Operators
+from mlia.backend.vela.compat import supported_operators
+from mlia.devices.ethosu.config import EthosUConfiguration
+from mlia.utils.filesystem import working_directory
+
+
+@pytest.mark.parametrize(
+ "model, expected_ops",
+ [
+ (
+ "test_model.tflite",
+ Operators(
+ ops=[
+ Operator(
+ name="sequential/conv1/Relu;sequential/conv1/BiasAdd;"
+ "sequential/conv2/Conv2D;sequential/conv1/Conv2D",
+ op_type="CONV_2D",
+ run_on_npu=NpuSupported(supported=True, reasons=[]),
+ ),
+ Operator(
+ name="sequential/conv2/Relu;sequential/conv2/BiasAdd;"
+ "sequential/conv2/Conv2D",
+ op_type="CONV_2D",
+ run_on_npu=NpuSupported(supported=True, reasons=[]),
+ ),
+ Operator(
+ name="sequential/max_pooling2d/MaxPool",
+ op_type="MAX_POOL_2D",
+ run_on_npu=NpuSupported(supported=True, reasons=[]),
+ ),
+ Operator(
+ name="sequential/flatten/Reshape",
+ op_type="RESHAPE",
+ run_on_npu=NpuSupported(supported=True, reasons=[]),
+ ),
+ Operator(
+ name="Identity",
+ op_type="FULLY_CONNECTED",
+ run_on_npu=NpuSupported(supported=True, reasons=[]),
+ ),
+ ]
+ ),
+ )
+ ],
+)
+def test_operators(test_models_path: Path, model: str, expected_ops: Operators) -> None:
+ """Test operators function."""
+ device = EthosUConfiguration("ethos-u55-256")
+
+ operators = supported_operators(test_models_path / model, device.compiler_options)
+ for expected, actual in zip(expected_ops.ops, operators.ops):
+ # do not compare names as they could be different on each model generation
+ assert expected.op_type == actual.op_type
+ assert expected.run_on_npu == actual.run_on_npu
+
+
+def test_generate_supported_operators_report(tmp_path: Path) -> None:
+ """Test generating supported operators report."""
+ with working_directory(tmp_path):
+ generate_supported_operators_report()
+
+ md_file = tmp_path / "SUPPORTED_OPS.md"
+ assert md_file.is_file()
+ assert md_file.stat().st_size > 0
diff --git a/tests/test_tools_vela_wrapper.py b/tests/test_backend_vela_compiler.py
index 0efcb0f..40268ae 100644
--- a/tests/test_tools_vela_wrapper.py
+++ b/tests/test_backend_vela_compiler.py
@@ -1,26 +1,16 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Tests for module tools/vela_wrapper."""
+"""Tests for module vela/compiler."""
from pathlib import Path
-from unittest.mock import MagicMock
-import pytest
from ethosu.vela.compiler_driver import TensorAllocator
from ethosu.vela.scheduler import OptimizationStrategy
+from mlia.backend.vela.compiler import optimize_model
+from mlia.backend.vela.compiler import OptimizedModel
+from mlia.backend.vela.compiler import VelaCompiler
+from mlia.backend.vela.compiler import VelaCompilerOptions
from mlia.devices.ethosu.config import EthosUConfiguration
-from mlia.tools.vela_wrapper import estimate_performance
-from mlia.tools.vela_wrapper import generate_supported_operators_report
-from mlia.tools.vela_wrapper import NpuSupported
-from mlia.tools.vela_wrapper import Operator
-from mlia.tools.vela_wrapper import Operators
-from mlia.tools.vela_wrapper import optimize_model
-from mlia.tools.vela_wrapper import OptimizedModel
-from mlia.tools.vela_wrapper import PerformanceMetrics
-from mlia.tools.vela_wrapper import supported_operators
-from mlia.tools.vela_wrapper import VelaCompiler
-from mlia.tools.vela_wrapper import VelaCompilerOptions
-from mlia.utils.filesystem import working_directory
def test_default_vela_compiler() -> None:
@@ -171,115 +161,3 @@ def test_optimize_model(tmp_path: Path, test_tflite_model: Path) -> None:
assert tmp_file.is_file()
assert tmp_file.stat().st_size > 0
-
-
-@pytest.mark.parametrize(
- "model, expected_ops",
- [
- (
- "test_model.tflite",
- Operators(
- ops=[
- Operator(
- name="sequential/conv1/Relu;sequential/conv1/BiasAdd;"
- "sequential/conv2/Conv2D;sequential/conv1/Conv2D",
- op_type="CONV_2D",
- run_on_npu=NpuSupported(supported=True, reasons=[]),
- ),
- Operator(
- name="sequential/conv2/Relu;sequential/conv2/BiasAdd;"
- "sequential/conv2/Conv2D",
- op_type="CONV_2D",
- run_on_npu=NpuSupported(supported=True, reasons=[]),
- ),
- Operator(
- name="sequential/max_pooling2d/MaxPool",
- op_type="MAX_POOL_2D",
- run_on_npu=NpuSupported(supported=True, reasons=[]),
- ),
- Operator(
- name="sequential/flatten/Reshape",
- op_type="RESHAPE",
- run_on_npu=NpuSupported(supported=True, reasons=[]),
- ),
- Operator(
- name="Identity",
- op_type="FULLY_CONNECTED",
- run_on_npu=NpuSupported(supported=True, reasons=[]),
- ),
- ]
- ),
- )
- ],
-)
-def test_operators(test_models_path: Path, model: str, expected_ops: Operators) -> None:
- """Test operators function."""
- device = EthosUConfiguration("ethos-u55-256")
-
- operators = supported_operators(test_models_path / model, device.compiler_options)
- for expected, actual in zip(expected_ops.ops, operators.ops):
- # do not compare names as they could be different on each model generation
- assert expected.op_type == actual.op_type
- assert expected.run_on_npu == actual.run_on_npu
-
-
-def test_estimate_performance(test_tflite_model: Path) -> None:
- """Test getting performance estimations."""
- device = EthosUConfiguration("ethos-u55-256")
- perf_metrics = estimate_performance(test_tflite_model, device.compiler_options)
-
- assert isinstance(perf_metrics, PerformanceMetrics)
-
-
-def test_estimate_performance_already_optimized(
- tmp_path: Path, test_tflite_model: Path
-) -> None:
- """Test that performance estimation should fail for already optimized model."""
- device = EthosUConfiguration("ethos-u55-256")
-
- optimized_model_path = tmp_path / "optimized_model.tflite"
-
- optimize_model(test_tflite_model, device.compiler_options, optimized_model_path)
-
- with pytest.raises(
- Exception, match="Unable to estimate performance for the given optimized model"
- ):
- estimate_performance(optimized_model_path, device.compiler_options)
-
-
-def test_generate_supported_operators_report(tmp_path: Path) -> None:
- """Test generating supported operators report."""
- with working_directory(tmp_path):
- generate_supported_operators_report()
-
- md_file = tmp_path / "SUPPORTED_OPS.md"
- assert md_file.is_file()
- assert md_file.stat().st_size > 0
-
-
-def test_read_invalid_model(test_tflite_invalid_model: Path) -> None:
- """Test that reading invalid model should fail with exception."""
- with pytest.raises(
- Exception, match=f"Unable to read model {test_tflite_invalid_model}"
- ):
- device = EthosUConfiguration("ethos-u55-256")
- estimate_performance(test_tflite_invalid_model, device.compiler_options)
-
-
-def test_compile_invalid_model(
- test_tflite_model: Path, monkeypatch: pytest.MonkeyPatch, tmp_path: Path
-) -> None:
- """Test that if model could not be compiled then correct exception raised."""
- mock_compiler = MagicMock()
- mock_compiler.side_effect = Exception("Bad model!")
-
- monkeypatch.setattr("mlia.tools.vela_wrapper.compiler_driver", mock_compiler)
-
- model_path = tmp_path / "optimized_model.tflite"
- with pytest.raises(
- Exception, match="Model could not be optimized with Vela compiler"
- ):
- device = EthosUConfiguration("ethos-u55-256")
- optimize_model(test_tflite_model, device.compiler_options, model_path)
-
- assert not model_path.exists()
diff --git a/tests/test_backend_vela_performance.py b/tests/test_backend_vela_performance.py
new file mode 100644
index 0000000..a1c806c
--- /dev/null
+++ b/tests/test_backend_vela_performance.py
@@ -0,0 +1,64 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for module vela/performance."""
+from pathlib import Path
+from unittest.mock import MagicMock
+
+import pytest
+
+from mlia.backend.vela.compiler import optimize_model
+from mlia.backend.vela.performance import estimate_performance
+from mlia.backend.vela.performance import PerformanceMetrics
+from mlia.devices.ethosu.config import EthosUConfiguration
+
+
+def test_estimate_performance(test_tflite_model: Path) -> None:
+ """Test getting performance estimations."""
+ device = EthosUConfiguration("ethos-u55-256")
+ perf_metrics = estimate_performance(test_tflite_model, device.compiler_options)
+
+ assert isinstance(perf_metrics, PerformanceMetrics)
+
+
+def test_estimate_performance_already_optimized(
+ tmp_path: Path, test_tflite_model: Path
+) -> None:
+ """Test that performance estimation should fail for already optimized model."""
+ device = EthosUConfiguration("ethos-u55-256")
+
+ optimized_model_path = tmp_path / "optimized_model.tflite"
+
+ optimize_model(test_tflite_model, device.compiler_options, optimized_model_path)
+
+ with pytest.raises(
+ Exception, match="Unable to estimate performance for the given optimized model"
+ ):
+ estimate_performance(optimized_model_path, device.compiler_options)
+
+
+def test_read_invalid_model(test_tflite_invalid_model: Path) -> None:
+ """Test that reading invalid model should fail with exception."""
+ with pytest.raises(
+ Exception, match=f"Unable to read model {test_tflite_invalid_model}"
+ ):
+ device = EthosUConfiguration("ethos-u55-256")
+ estimate_performance(test_tflite_invalid_model, device.compiler_options)
+
+
+def test_compile_invalid_model(
+ test_tflite_model: Path, monkeypatch: pytest.MonkeyPatch, tmp_path: Path
+) -> None:
+ """Test that if model could not be compiled then correct exception raised."""
+ mock_compiler = MagicMock()
+ mock_compiler.side_effect = Exception("Bad model!")
+
+ monkeypatch.setattr("mlia.backend.vela.compiler.compiler_driver", mock_compiler)
+
+ model_path = tmp_path / "optimized_model.tflite"
+ with pytest.raises(
+ Exception, match="Model could not be optimized with Vela compiler"
+ ):
+ device = EthosUConfiguration("ethos-u55-256")
+ optimize_model(test_tflite_model, device.compiler_options, model_path)
+
+ assert not model_path.exists()
diff --git a/tests/test_cli_commands.py b/tests/test_cli_commands.py
index 3a01f78..77e1f88 100644
--- a/tests/test_cli_commands.py
+++ b/tests/test_cli_commands.py
@@ -10,6 +10,7 @@ from unittest.mock import MagicMock
import pytest
+from mlia.backend.manager import DefaultInstallationManager
from mlia.cli.commands import backend_install
from mlia.cli.commands import backend_list
from mlia.cli.commands import backend_uninstall
@@ -21,7 +22,6 @@ from mlia.devices.ethosu.config import EthosUConfiguration
from mlia.devices.ethosu.performance import MemoryUsage
from mlia.devices.ethosu.performance import NPUCycles
from mlia.devices.ethosu.performance import PerformanceMetrics
-from mlia.tools.metadata.common import DefaultInstallationManager
def test_operators_expected_parameters(sample_context: ExecutionContext) -> None:
diff --git a/tests/test_devices_ethosu_config.py b/tests/test_devices_ethosu_config.py
index d4e043f..2fec0d5 100644
--- a/tests/test_devices_ethosu_config.py
+++ b/tests/test_devices_ethosu_config.py
@@ -9,9 +9,9 @@ from unittest.mock import MagicMock
import pytest
+from mlia.backend.vela.compiler import VelaCompilerOptions
from mlia.devices.ethosu.config import EthosUConfiguration
from mlia.devices.ethosu.config import get_target
-from mlia.tools.vela_wrapper import VelaCompilerOptions
from mlia.utils.filesystem import get_vela_config
diff --git a/tests/test_devices_ethosu_data_analysis.py b/tests/test_devices_ethosu_data_analysis.py
index 26aae76..8184c70 100644
--- a/tests/test_devices_ethosu_data_analysis.py
+++ b/tests/test_devices_ethosu_data_analysis.py
@@ -5,6 +5,9 @@ from __future__ import annotations
import pytest
+from mlia.backend.vela.compat import NpuSupported
+from mlia.backend.vela.compat import Operator
+from mlia.backend.vela.compat import Operators
from mlia.core.common import DataItem
from mlia.core.data_analysis import Fact
from mlia.devices.ethosu.config import EthosUConfiguration
@@ -20,9 +23,6 @@ from mlia.devices.ethosu.performance import NPUCycles
from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics
from mlia.devices.ethosu.performance import PerformanceMetrics
from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
-from mlia.tools.vela_wrapper import NpuSupported
-from mlia.tools.vela_wrapper import Operator
-from mlia.tools.vela_wrapper import Operators
def test_perf_metrics_diff() -> None:
diff --git a/tests/test_devices_ethosu_data_collection.py b/tests/test_devices_ethosu_data_collection.py
index a4f37aa..84b9424 100644
--- a/tests/test_devices_ethosu_data_collection.py
+++ b/tests/test_devices_ethosu_data_collection.py
@@ -6,6 +6,7 @@ from unittest.mock import MagicMock
import pytest
+from mlia.backend.vela.compat import Operators
from mlia.core.context import Context
from mlia.core.data_collection import DataCollector
from mlia.core.errors import FunctionalityNotSupportedError
@@ -18,7 +19,6 @@ from mlia.devices.ethosu.performance import NPUCycles
from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics
from mlia.devices.ethosu.performance import PerformanceMetrics
from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
-from mlia.tools.vela_wrapper import Operators
@pytest.mark.parametrize(
diff --git a/tests/test_devices_ethosu_performance.py b/tests/test_devices_ethosu_performance.py
index b3e5298..3ff73d8 100644
--- a/tests/test_devices_ethosu_performance.py
+++ b/tests/test_devices_ethosu_performance.py
@@ -23,6 +23,6 @@ def test_memory_usage_conversion() -> None:
def mock_performance_estimation(monkeypatch: pytest.MonkeyPatch) -> None:
"""Mock performance estimation."""
monkeypatch.setattr(
- "mlia.backend.manager.estimate_performance",
+ "mlia.backend.corstone.performance.estimate_performance",
MagicMock(return_value=MagicMock()),
)
diff --git a/tests/test_devices_ethosu_reporters.py b/tests/test_devices_ethosu_reporters.py
index f04270c..926c4c3 100644
--- a/tests/test_devices_ethosu_reporters.py
+++ b/tests/test_devices_ethosu_reporters.py
@@ -13,6 +13,9 @@ from typing import Literal
import pytest
+from mlia.backend.vela.compat import NpuSupported
+from mlia.backend.vela.compat import Operator
+from mlia.backend.vela.compat import Operators
from mlia.core.reporting import get_reporter
from mlia.core.reporting import produce_report
from mlia.core.reporting import Report
@@ -26,9 +29,6 @@ from mlia.devices.ethosu.reporters import ethos_u_formatters
from mlia.devices.ethosu.reporters import report_device_details
from mlia.devices.ethosu.reporters import report_operators
from mlia.devices.ethosu.reporters import report_perf_metrics
-from mlia.tools.vela_wrapper import NpuSupported
-from mlia.tools.vela_wrapper import Operator
-from mlia.tools.vela_wrapper import Operators
from mlia.utils.console import remove_ascii_codes
diff --git a/tests/test_tools_metadata_common.py b/tests/test_tools_metadata_common.py
deleted file mode 100644
index 9811852..0000000
--- a/tests/test_tools_metadata_common.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for commmon installation related functions."""
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Any
-from unittest.mock import call
-from unittest.mock import MagicMock
-from unittest.mock import PropertyMock
-
-import pytest
-
-from mlia.tools.metadata.common import DefaultInstallationManager
-from mlia.tools.metadata.common import DownloadAndInstall
-from mlia.tools.metadata.common import Installation
-from mlia.tools.metadata.common import InstallationType
-from mlia.tools.metadata.common import InstallFromPath
-
-
-def get_default_installation_manager_mock(
- name: str,
- already_installed: bool = False,
-) -> MagicMock:
- """Get mock instance for DefaultInstallationManager."""
- mock = MagicMock(spec=DefaultInstallationManager)
-
- props = {
- "name": name,
- "already_installed": already_installed,
- }
- for prop, value in props.items():
- setattr(type(mock), prop, PropertyMock(return_value=value))
-
- return mock
-
-
-def _ready_for_uninstall_mock() -> MagicMock:
- return get_default_installation_manager_mock(
- name="already_installed",
- already_installed=True,
- )
-
-
-def get_installation_mock(
- name: str,
- already_installed: bool = False,
- could_be_installed: bool = False,
- supported_install_type: type | tuple | None = None,
-) -> MagicMock:
- """Get mock instance for the installation."""
- mock = MagicMock(spec=Installation)
-
- def supports(install_type: InstallationType) -> bool:
- if supported_install_type is None:
- return False
-
- return isinstance(install_type, supported_install_type)
-
- mock.supports.side_effect = supports
-
- props = {
- "name": name,
- "already_installed": already_installed,
- "could_be_installed": could_be_installed,
- }
- for prop, value in props.items():
- setattr(type(mock), prop, PropertyMock(return_value=value))
-
- return mock
-
-
-def _already_installed_mock() -> MagicMock:
- return get_installation_mock(
- name="already_installed",
- already_installed=True,
- supported_install_type=(DownloadAndInstall, InstallFromPath),
- )
-
-
-def _ready_for_installation_mock() -> MagicMock:
- return get_installation_mock(
- name="ready_for_installation",
- already_installed=False,
- could_be_installed=True,
- )
-
-
-def _could_be_downloaded_and_installed_mock() -> MagicMock:
- return get_installation_mock(
- name="could_be_downloaded_and_installed",
- already_installed=False,
- could_be_installed=True,
- supported_install_type=DownloadAndInstall,
- )
-
-
-def _could_be_installed_from_mock() -> MagicMock:
- return get_installation_mock(
- name="could_be_installed_from",
- already_installed=False,
- could_be_installed=True,
- supported_install_type=InstallFromPath,
- )
-
-
-def get_installation_manager(
- noninteractive: bool,
- installations: list[Any],
- monkeypatch: pytest.MonkeyPatch,
- yes_response: bool = True,
-) -> DefaultInstallationManager:
- """Get installation manager instance."""
- if not noninteractive:
- monkeypatch.setattr(
- "mlia.tools.metadata.common.yes", MagicMock(return_value=yes_response)
- )
-
- return DefaultInstallationManager(installations, noninteractive=noninteractive)
-
-
-def test_installation_manager_filtering() -> None:
- """Test default installation manager."""
- already_installed = _already_installed_mock()
- ready_for_installation = _ready_for_installation_mock()
- could_be_downloaded_and_installed = _could_be_downloaded_and_installed_mock()
-
- manager = DefaultInstallationManager(
- [
- already_installed,
- ready_for_installation,
- could_be_downloaded_and_installed,
- ]
- )
- assert manager.already_installed("already_installed") == [already_installed]
- assert manager.ready_for_installation() == [
- ready_for_installation,
- could_be_downloaded_and_installed,
- ]
-
-
-@pytest.mark.parametrize("noninteractive", [True, False])
-@pytest.mark.parametrize(
- "install_mock, eula_agreement, backend_name, force, expected_call",
- [
- [
- _could_be_downloaded_and_installed_mock(),
- True,
- "could_be_downloaded_and_installed",
- False,
- [call(DownloadAndInstall(eula_agreement=True))],
- ],
- [
- _could_be_downloaded_and_installed_mock(),
- False,
- "could_be_downloaded_and_installed",
- True,
- [call(DownloadAndInstall(eula_agreement=False))],
- ],
- [
- _already_installed_mock(),
- False,
- "already_installed",
- True,
- [call(DownloadAndInstall(eula_agreement=False))],
- ],
- [
- _could_be_downloaded_and_installed_mock(),
- False,
- "unknown",
- True,
- [],
- ],
- ],
-)
-def test_installation_manager_download_and_install(
- install_mock: MagicMock,
- noninteractive: bool,
- eula_agreement: bool,
- backend_name: str,
- force: bool,
- expected_call: Any,
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- """Test installation process."""
- install_mock.reset_mock()
-
- manager = get_installation_manager(noninteractive, [install_mock], monkeypatch)
-
- manager.download_and_install(
- backend_name, eula_agreement=eula_agreement, force=force
- )
-
- assert install_mock.install.mock_calls == expected_call
- if force and install_mock.already_installed:
- install_mock.uninstall.assert_called_once()
- else:
- install_mock.uninstall.assert_not_called()
-
-
-@pytest.mark.parametrize("noninteractive", [True, False])
-@pytest.mark.parametrize(
- "install_mock, backend_name, force, expected_call",
- [
- [
- _could_be_installed_from_mock(),
- "could_be_installed_from",
- False,
- [call(InstallFromPath(Path("some_path")))],
- ],
- [
- _could_be_installed_from_mock(),
- "unknown",
- False,
- [],
- ],
- [
- _could_be_installed_from_mock(),
- "unknown",
- True,
- [],
- ],
- [
- _already_installed_mock(),
- "already_installed",
- False,
- [],
- ],
- [
- _already_installed_mock(),
- "already_installed",
- True,
- [call(InstallFromPath(Path("some_path")))],
- ],
- ],
-)
-def test_installation_manager_install_from(
- install_mock: MagicMock,
- noninteractive: bool,
- backend_name: str,
- force: bool,
- expected_call: Any,
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- """Test installation process."""
- install_mock.reset_mock()
-
- manager = get_installation_manager(noninteractive, [install_mock], monkeypatch)
- manager.install_from(Path("some_path"), backend_name, force=force)
-
- assert install_mock.install.mock_calls == expected_call
- if force and install_mock.already_installed:
- install_mock.uninstall.assert_called_once()
- else:
- install_mock.uninstall.assert_not_called()
-
-
-@pytest.mark.parametrize("noninteractive", [True, False])
-@pytest.mark.parametrize(
- "install_mock, backend_name, expected_call",
- [
- [
- _ready_for_uninstall_mock(),
- "already_installed",
- [call()],
- ],
- ],
-)
-def test_installation_manager_uninstall(
- install_mock: MagicMock,
- noninteractive: bool,
- backend_name: str,
- expected_call: Any,
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- """Test uninstallation."""
- install_mock.reset_mock()
-
- manager = get_installation_manager(noninteractive, [install_mock], monkeypatch)
- manager.uninstall(backend_name)
-
- assert install_mock.uninstall.mock_calls == expected_call